2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/iocontext.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes
[];
45 static int init_first_rw_device(struct btrfs_trans_handle
*trans
,
46 struct btrfs_root
*root
,
47 struct btrfs_device
*device
);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
);
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static DEFINE_MUTEX(uuid_mutex
);
54 static LIST_HEAD(fs_uuids
);
56 void btrfs_lock_volumes(void)
58 mutex_lock(&uuid_mutex
);
61 void btrfs_unlock_volumes(void)
63 mutex_unlock(&uuid_mutex
);
66 static void lock_chunks(struct btrfs_root
*root
)
68 mutex_lock(&root
->fs_info
->chunk_mutex
);
71 static void unlock_chunks(struct btrfs_root
*root
)
73 mutex_unlock(&root
->fs_info
->chunk_mutex
);
76 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
78 struct btrfs_device
*device
;
79 WARN_ON(fs_devices
->opened
);
80 while (!list_empty(&fs_devices
->devices
)) {
81 device
= list_entry(fs_devices
->devices
.next
,
82 struct btrfs_device
, dev_list
);
83 list_del(&device
->dev_list
);
90 int btrfs_cleanup_fs_uuids(void)
92 struct btrfs_fs_devices
*fs_devices
;
94 while (!list_empty(&fs_uuids
)) {
95 fs_devices
= list_entry(fs_uuids
.next
,
96 struct btrfs_fs_devices
, list
);
97 list_del(&fs_devices
->list
);
98 free_fs_devices(fs_devices
);
103 static noinline
struct btrfs_device
*__find_device(struct list_head
*head
,
106 struct btrfs_device
*dev
;
108 list_for_each_entry(dev
, head
, dev_list
) {
109 if (dev
->devid
== devid
&&
110 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
117 static noinline
struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
119 struct btrfs_fs_devices
*fs_devices
;
121 list_for_each_entry(fs_devices
, &fs_uuids
, list
) {
122 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
128 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
129 struct bio
*head
, struct bio
*tail
)
132 struct bio
*old_head
;
134 old_head
= pending_bios
->head
;
135 pending_bios
->head
= head
;
136 if (pending_bios
->tail
)
137 tail
->bi_next
= old_head
;
139 pending_bios
->tail
= tail
;
143 * we try to collect pending bios for a device so we don't get a large
144 * number of procs sending bios down to the same device. This greatly
145 * improves the schedulers ability to collect and merge the bios.
147 * But, it also turns into a long list of bios to process and that is sure
148 * to eventually make the worker thread block. The solution here is to
149 * make some progress and then put this work struct back at the end of
150 * the list if the block device is congested. This way, multiple devices
151 * can make progress from a single worker thread.
153 static noinline
int run_scheduled_bios(struct btrfs_device
*device
)
156 struct backing_dev_info
*bdi
;
157 struct btrfs_fs_info
*fs_info
;
158 struct btrfs_pending_bios
*pending_bios
;
162 unsigned long num_run
;
163 unsigned long num_sync_run
;
165 unsigned long last_waited
= 0;
168 bdi
= blk_get_backing_dev_info(device
->bdev
);
169 fs_info
= device
->dev_root
->fs_info
;
170 limit
= btrfs_async_submit_limit(fs_info
);
171 limit
= limit
* 2 / 3;
173 /* we want to make sure that every time we switch from the sync
174 * list to the normal list, we unplug
179 spin_lock(&device
->io_lock
);
184 /* take all the bios off the list at once and process them
185 * later on (without the lock held). But, remember the
186 * tail and other pointers so the bios can be properly reinserted
187 * into the list if we hit congestion
189 if (!force_reg
&& device
->pending_sync_bios
.head
) {
190 pending_bios
= &device
->pending_sync_bios
;
193 pending_bios
= &device
->pending_bios
;
197 pending
= pending_bios
->head
;
198 tail
= pending_bios
->tail
;
199 WARN_ON(pending
&& !tail
);
202 * if pending was null this time around, no bios need processing
203 * at all and we can stop. Otherwise it'll loop back up again
204 * and do an additional check so no bios are missed.
206 * device->running_pending is used to synchronize with the
209 if (device
->pending_sync_bios
.head
== NULL
&&
210 device
->pending_bios
.head
== NULL
) {
212 device
->running_pending
= 0;
215 device
->running_pending
= 1;
218 pending_bios
->head
= NULL
;
219 pending_bios
->tail
= NULL
;
221 spin_unlock(&device
->io_lock
);
224 * if we're doing the regular priority list, make sure we unplug
225 * for any high prio bios we've sent down
227 if (pending_bios
== &device
->pending_bios
&& num_sync_run
> 0) {
229 blk_run_backing_dev(bdi
, NULL
);
235 /* we want to work on both lists, but do more bios on the
236 * sync list than the regular list
239 pending_bios
!= &device
->pending_sync_bios
&&
240 device
->pending_sync_bios
.head
) ||
241 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
242 device
->pending_bios
.head
)) {
243 spin_lock(&device
->io_lock
);
244 requeue_list(pending_bios
, pending
, tail
);
249 pending
= pending
->bi_next
;
251 atomic_dec(&fs_info
->nr_async_bios
);
253 if (atomic_read(&fs_info
->nr_async_bios
) < limit
&&
254 waitqueue_active(&fs_info
->async_submit_wait
))
255 wake_up(&fs_info
->async_submit_wait
);
257 BUG_ON(atomic_read(&cur
->bi_cnt
) == 0);
258 submit_bio(cur
->bi_rw
, cur
);
263 if (need_resched()) {
265 blk_run_backing_dev(bdi
, NULL
);
272 * we made progress, there is more work to do and the bdi
273 * is now congested. Back off and let other work structs
276 if (pending
&& bdi_write_congested(bdi
) && num_run
> 16 &&
277 fs_info
->fs_devices
->open_devices
> 1) {
278 struct io_context
*ioc
;
280 ioc
= current
->io_context
;
283 * the main goal here is that we don't want to
284 * block if we're going to be able to submit
285 * more requests without blocking.
287 * This code does two great things, it pokes into
288 * the elevator code from a filesystem _and_
289 * it makes assumptions about how batching works.
291 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
292 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
294 ioc
->last_waited
== last_waited
)) {
296 * we want to go through our batch of
297 * requests and stop. So, we copy out
298 * the ioc->last_waited time and test
299 * against it before looping
301 last_waited
= ioc
->last_waited
;
302 if (need_resched()) {
304 blk_run_backing_dev(bdi
, NULL
);
311 spin_lock(&device
->io_lock
);
312 requeue_list(pending_bios
, pending
, tail
);
313 device
->running_pending
= 1;
315 spin_unlock(&device
->io_lock
);
316 btrfs_requeue_work(&device
->work
);
323 blk_run_backing_dev(bdi
, NULL
);
330 spin_lock(&device
->io_lock
);
331 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
333 spin_unlock(&device
->io_lock
);
336 * IO has already been through a long path to get here. Checksumming,
337 * async helper threads, perhaps compression. We've done a pretty
338 * good job of collecting a batch of IO and should just unplug
339 * the device right away.
341 * This will help anyone who is waiting on the IO, they might have
342 * already unplugged, but managed to do so before the bio they
343 * cared about found its way down here.
345 blk_run_backing_dev(bdi
, NULL
);
350 static void pending_bios_fn(struct btrfs_work
*work
)
352 struct btrfs_device
*device
;
354 device
= container_of(work
, struct btrfs_device
, work
);
355 run_scheduled_bios(device
);
358 static noinline
int device_list_add(const char *path
,
359 struct btrfs_super_block
*disk_super
,
360 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
362 struct btrfs_device
*device
;
363 struct btrfs_fs_devices
*fs_devices
;
364 u64 found_transid
= btrfs_super_generation(disk_super
);
366 fs_devices
= find_fsid(disk_super
->fsid
);
368 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
371 INIT_LIST_HEAD(&fs_devices
->devices
);
372 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
373 list_add(&fs_devices
->list
, &fs_uuids
);
374 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
375 fs_devices
->latest_devid
= devid
;
376 fs_devices
->latest_trans
= found_transid
;
379 device
= __find_device(&fs_devices
->devices
, devid
,
380 disk_super
->dev_item
.uuid
);
383 if (fs_devices
->opened
)
386 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
388 /* we can safely leave the fs_devices entry around */
391 device
->devid
= devid
;
392 device
->work
.func
= pending_bios_fn
;
393 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
395 device
->barriers
= 1;
396 spin_lock_init(&device
->io_lock
);
397 device
->name
= kstrdup(path
, GFP_NOFS
);
402 INIT_LIST_HEAD(&device
->dev_alloc_list
);
403 list_add(&device
->dev_list
, &fs_devices
->devices
);
404 device
->fs_devices
= fs_devices
;
405 fs_devices
->num_devices
++;
408 if (found_transid
> fs_devices
->latest_trans
) {
409 fs_devices
->latest_devid
= devid
;
410 fs_devices
->latest_trans
= found_transid
;
412 *fs_devices_ret
= fs_devices
;
416 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
418 struct btrfs_fs_devices
*fs_devices
;
419 struct btrfs_device
*device
;
420 struct btrfs_device
*orig_dev
;
422 fs_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
424 return ERR_PTR(-ENOMEM
);
426 INIT_LIST_HEAD(&fs_devices
->devices
);
427 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
428 INIT_LIST_HEAD(&fs_devices
->list
);
429 fs_devices
->latest_devid
= orig
->latest_devid
;
430 fs_devices
->latest_trans
= orig
->latest_trans
;
431 memcpy(fs_devices
->fsid
, orig
->fsid
, sizeof(fs_devices
->fsid
));
433 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
434 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
438 device
->name
= kstrdup(orig_dev
->name
, GFP_NOFS
);
442 device
->devid
= orig_dev
->devid
;
443 device
->work
.func
= pending_bios_fn
;
444 memcpy(device
->uuid
, orig_dev
->uuid
, sizeof(device
->uuid
));
445 device
->barriers
= 1;
446 spin_lock_init(&device
->io_lock
);
447 INIT_LIST_HEAD(&device
->dev_list
);
448 INIT_LIST_HEAD(&device
->dev_alloc_list
);
450 list_add(&device
->dev_list
, &fs_devices
->devices
);
451 device
->fs_devices
= fs_devices
;
452 fs_devices
->num_devices
++;
456 free_fs_devices(fs_devices
);
457 return ERR_PTR(-ENOMEM
);
460 int btrfs_close_extra_devices(struct btrfs_fs_devices
*fs_devices
)
462 struct btrfs_device
*device
, *next
;
464 mutex_lock(&uuid_mutex
);
466 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
467 if (device
->in_fs_metadata
)
471 close_bdev_exclusive(device
->bdev
, device
->mode
);
473 fs_devices
->open_devices
--;
475 if (device
->writeable
) {
476 list_del_init(&device
->dev_alloc_list
);
477 device
->writeable
= 0;
478 fs_devices
->rw_devices
--;
480 list_del_init(&device
->dev_list
);
481 fs_devices
->num_devices
--;
486 if (fs_devices
->seed
) {
487 fs_devices
= fs_devices
->seed
;
491 mutex_unlock(&uuid_mutex
);
495 static int __btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
497 struct btrfs_device
*device
;
499 if (--fs_devices
->opened
> 0)
502 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
504 close_bdev_exclusive(device
->bdev
, device
->mode
);
505 fs_devices
->open_devices
--;
507 if (device
->writeable
) {
508 list_del_init(&device
->dev_alloc_list
);
509 fs_devices
->rw_devices
--;
513 device
->writeable
= 0;
514 device
->in_fs_metadata
= 0;
516 WARN_ON(fs_devices
->open_devices
);
517 WARN_ON(fs_devices
->rw_devices
);
518 fs_devices
->opened
= 0;
519 fs_devices
->seeding
= 0;
524 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
526 struct btrfs_fs_devices
*seed_devices
= NULL
;
529 mutex_lock(&uuid_mutex
);
530 ret
= __btrfs_close_devices(fs_devices
);
531 if (!fs_devices
->opened
) {
532 seed_devices
= fs_devices
->seed
;
533 fs_devices
->seed
= NULL
;
535 mutex_unlock(&uuid_mutex
);
537 while (seed_devices
) {
538 fs_devices
= seed_devices
;
539 seed_devices
= fs_devices
->seed
;
540 __btrfs_close_devices(fs_devices
);
541 free_fs_devices(fs_devices
);
546 static int __btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
547 fmode_t flags
, void *holder
)
549 struct block_device
*bdev
;
550 struct list_head
*head
= &fs_devices
->devices
;
551 struct btrfs_device
*device
;
552 struct block_device
*latest_bdev
= NULL
;
553 struct buffer_head
*bh
;
554 struct btrfs_super_block
*disk_super
;
555 u64 latest_devid
= 0;
556 u64 latest_transid
= 0;
561 list_for_each_entry(device
, head
, dev_list
) {
567 bdev
= open_bdev_exclusive(device
->name
, flags
, holder
);
569 printk(KERN_INFO
"open %s failed\n", device
->name
);
572 set_blocksize(bdev
, 4096);
574 bh
= btrfs_read_dev_super(bdev
);
578 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
579 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
580 if (devid
!= device
->devid
)
583 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
,
587 device
->generation
= btrfs_super_generation(disk_super
);
588 if (!latest_transid
|| device
->generation
> latest_transid
) {
589 latest_devid
= devid
;
590 latest_transid
= device
->generation
;
594 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
595 device
->writeable
= 0;
597 device
->writeable
= !bdev_read_only(bdev
);
602 device
->in_fs_metadata
= 0;
603 device
->mode
= flags
;
605 fs_devices
->open_devices
++;
606 if (device
->writeable
) {
607 fs_devices
->rw_devices
++;
608 list_add(&device
->dev_alloc_list
,
609 &fs_devices
->alloc_list
);
616 close_bdev_exclusive(bdev
, FMODE_READ
);
620 if (fs_devices
->open_devices
== 0) {
624 fs_devices
->seeding
= seeding
;
625 fs_devices
->opened
= 1;
626 fs_devices
->latest_bdev
= latest_bdev
;
627 fs_devices
->latest_devid
= latest_devid
;
628 fs_devices
->latest_trans
= latest_transid
;
629 fs_devices
->total_rw_bytes
= 0;
634 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
635 fmode_t flags
, void *holder
)
639 mutex_lock(&uuid_mutex
);
640 if (fs_devices
->opened
) {
641 fs_devices
->opened
++;
644 ret
= __btrfs_open_devices(fs_devices
, flags
, holder
);
646 mutex_unlock(&uuid_mutex
);
650 int btrfs_scan_one_device(const char *path
, fmode_t flags
, void *holder
,
651 struct btrfs_fs_devices
**fs_devices_ret
)
653 struct btrfs_super_block
*disk_super
;
654 struct block_device
*bdev
;
655 struct buffer_head
*bh
;
660 mutex_lock(&uuid_mutex
);
662 bdev
= open_bdev_exclusive(path
, flags
, holder
);
669 ret
= set_blocksize(bdev
, 4096);
672 bh
= btrfs_read_dev_super(bdev
);
677 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
678 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
679 transid
= btrfs_super_generation(disk_super
);
680 if (disk_super
->label
[0])
681 printk(KERN_INFO
"device label %s ", disk_super
->label
);
683 /* FIXME, make a readl uuid parser */
684 printk(KERN_INFO
"device fsid %llx-%llx ",
685 *(unsigned long long *)disk_super
->fsid
,
686 *(unsigned long long *)(disk_super
->fsid
+ 8));
688 printk(KERN_CONT
"devid %llu transid %llu %s\n",
689 (unsigned long long)devid
, (unsigned long long)transid
, path
);
690 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
694 close_bdev_exclusive(bdev
, flags
);
696 mutex_unlock(&uuid_mutex
);
701 * this uses a pretty simple search, the expectation is that it is
702 * called very infrequently and that a given device has a small number
705 static noinline
int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
706 struct btrfs_device
*device
,
707 u64 num_bytes
, u64
*start
)
709 struct btrfs_key key
;
710 struct btrfs_root
*root
= device
->dev_root
;
711 struct btrfs_dev_extent
*dev_extent
= NULL
;
712 struct btrfs_path
*path
;
715 u64 search_start
= 0;
716 u64 search_end
= device
->total_bytes
;
720 struct extent_buffer
*l
;
722 path
= btrfs_alloc_path();
728 /* FIXME use last free of some kind */
730 /* we don't want to overwrite the superblock on the drive,
731 * so we make sure to start at an offset of at least 1MB
733 search_start
= max((u64
)1024 * 1024, search_start
);
735 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
736 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
738 key
.objectid
= device
->devid
;
739 key
.offset
= search_start
;
740 key
.type
= BTRFS_DEV_EXTENT_KEY
;
741 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
744 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
748 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
751 slot
= path
->slots
[0];
752 if (slot
>= btrfs_header_nritems(l
)) {
753 ret
= btrfs_next_leaf(root
, path
);
760 if (search_start
>= search_end
) {
764 *start
= search_start
;
768 *start
= last_byte
> search_start
?
769 last_byte
: search_start
;
770 if (search_end
<= *start
) {
776 btrfs_item_key_to_cpu(l
, &key
, slot
);
778 if (key
.objectid
< device
->devid
)
781 if (key
.objectid
> device
->devid
)
784 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
786 if (last_byte
< search_start
)
787 last_byte
= search_start
;
788 hole_size
= key
.offset
- last_byte
;
789 if (key
.offset
> last_byte
&&
790 hole_size
>= num_bytes
) {
795 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
)
799 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
800 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
806 /* we have to make sure we didn't find an extent that has already
807 * been allocated by the map tree or the original allocation
809 BUG_ON(*start
< search_start
);
811 if (*start
+ num_bytes
> search_end
) {
815 /* check for pending inserts here */
819 btrfs_free_path(path
);
823 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
824 struct btrfs_device
*device
,
828 struct btrfs_path
*path
;
829 struct btrfs_root
*root
= device
->dev_root
;
830 struct btrfs_key key
;
831 struct btrfs_key found_key
;
832 struct extent_buffer
*leaf
= NULL
;
833 struct btrfs_dev_extent
*extent
= NULL
;
835 path
= btrfs_alloc_path();
839 key
.objectid
= device
->devid
;
841 key
.type
= BTRFS_DEV_EXTENT_KEY
;
843 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
845 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
846 BTRFS_DEV_EXTENT_KEY
);
848 leaf
= path
->nodes
[0];
849 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
850 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
851 struct btrfs_dev_extent
);
852 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
853 btrfs_dev_extent_length(leaf
, extent
) < start
);
855 } else if (ret
== 0) {
856 leaf
= path
->nodes
[0];
857 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
858 struct btrfs_dev_extent
);
862 if (device
->bytes_used
> 0)
863 device
->bytes_used
-= btrfs_dev_extent_length(leaf
, extent
);
864 ret
= btrfs_del_item(trans
, root
, path
);
867 btrfs_free_path(path
);
871 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
872 struct btrfs_device
*device
,
873 u64 chunk_tree
, u64 chunk_objectid
,
874 u64 chunk_offset
, u64 start
, u64 num_bytes
)
877 struct btrfs_path
*path
;
878 struct btrfs_root
*root
= device
->dev_root
;
879 struct btrfs_dev_extent
*extent
;
880 struct extent_buffer
*leaf
;
881 struct btrfs_key key
;
883 WARN_ON(!device
->in_fs_metadata
);
884 path
= btrfs_alloc_path();
888 key
.objectid
= device
->devid
;
890 key
.type
= BTRFS_DEV_EXTENT_KEY
;
891 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
895 leaf
= path
->nodes
[0];
896 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
897 struct btrfs_dev_extent
);
898 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
899 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
900 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
902 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
903 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
906 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
907 btrfs_mark_buffer_dirty(leaf
);
908 btrfs_free_path(path
);
912 static noinline
int find_next_chunk(struct btrfs_root
*root
,
913 u64 objectid
, u64
*offset
)
915 struct btrfs_path
*path
;
917 struct btrfs_key key
;
918 struct btrfs_chunk
*chunk
;
919 struct btrfs_key found_key
;
921 path
= btrfs_alloc_path();
924 key
.objectid
= objectid
;
925 key
.offset
= (u64
)-1;
926 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
928 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
934 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
938 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
940 if (found_key
.objectid
!= objectid
)
943 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
945 *offset
= found_key
.offset
+
946 btrfs_chunk_length(path
->nodes
[0], chunk
);
951 btrfs_free_path(path
);
955 static noinline
int find_next_devid(struct btrfs_root
*root
, u64
*objectid
)
958 struct btrfs_key key
;
959 struct btrfs_key found_key
;
960 struct btrfs_path
*path
;
962 root
= root
->fs_info
->chunk_root
;
964 path
= btrfs_alloc_path();
968 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
969 key
.type
= BTRFS_DEV_ITEM_KEY
;
970 key
.offset
= (u64
)-1;
972 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
978 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
983 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
985 *objectid
= found_key
.offset
+ 1;
989 btrfs_free_path(path
);
994 * the device information is stored in the chunk root
995 * the btrfs_device struct should be fully filled in
997 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
998 struct btrfs_root
*root
,
999 struct btrfs_device
*device
)
1002 struct btrfs_path
*path
;
1003 struct btrfs_dev_item
*dev_item
;
1004 struct extent_buffer
*leaf
;
1005 struct btrfs_key key
;
1008 root
= root
->fs_info
->chunk_root
;
1010 path
= btrfs_alloc_path();
1014 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1015 key
.type
= BTRFS_DEV_ITEM_KEY
;
1016 key
.offset
= device
->devid
;
1018 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1023 leaf
= path
->nodes
[0];
1024 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1026 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1027 btrfs_set_device_generation(leaf
, dev_item
, 0);
1028 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1029 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1030 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1031 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1032 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
1033 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1034 btrfs_set_device_group(leaf
, dev_item
, 0);
1035 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1036 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1037 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1039 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1040 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1041 ptr
= (unsigned long)btrfs_device_fsid(dev_item
);
1042 write_extent_buffer(leaf
, root
->fs_info
->fsid
, ptr
, BTRFS_UUID_SIZE
);
1043 btrfs_mark_buffer_dirty(leaf
);
1047 btrfs_free_path(path
);
1051 static int btrfs_rm_dev_item(struct btrfs_root
*root
,
1052 struct btrfs_device
*device
)
1055 struct btrfs_path
*path
;
1056 struct btrfs_key key
;
1057 struct btrfs_trans_handle
*trans
;
1059 root
= root
->fs_info
->chunk_root
;
1061 path
= btrfs_alloc_path();
1065 trans
= btrfs_start_transaction(root
, 1);
1066 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1067 key
.type
= BTRFS_DEV_ITEM_KEY
;
1068 key
.offset
= device
->devid
;
1071 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1080 ret
= btrfs_del_item(trans
, root
, path
);
1084 btrfs_free_path(path
);
1085 unlock_chunks(root
);
1086 btrfs_commit_transaction(trans
, root
);
1090 int btrfs_rm_device(struct btrfs_root
*root
, char *device_path
)
1092 struct btrfs_device
*device
;
1093 struct btrfs_device
*next_device
;
1094 struct block_device
*bdev
;
1095 struct buffer_head
*bh
= NULL
;
1096 struct btrfs_super_block
*disk_super
;
1103 mutex_lock(&uuid_mutex
);
1104 mutex_lock(&root
->fs_info
->volume_mutex
);
1106 all_avail
= root
->fs_info
->avail_data_alloc_bits
|
1107 root
->fs_info
->avail_system_alloc_bits
|
1108 root
->fs_info
->avail_metadata_alloc_bits
;
1110 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID10
) &&
1111 root
->fs_info
->fs_devices
->rw_devices
<= 4) {
1112 printk(KERN_ERR
"btrfs: unable to go below four devices "
1118 if ((all_avail
& BTRFS_BLOCK_GROUP_RAID1
) &&
1119 root
->fs_info
->fs_devices
->rw_devices
<= 2) {
1120 printk(KERN_ERR
"btrfs: unable to go below two "
1121 "devices on raid1\n");
1126 if (strcmp(device_path
, "missing") == 0) {
1127 struct list_head
*devices
;
1128 struct btrfs_device
*tmp
;
1131 devices
= &root
->fs_info
->fs_devices
->devices
;
1132 list_for_each_entry(tmp
, devices
, dev_list
) {
1133 if (tmp
->in_fs_metadata
&& !tmp
->bdev
) {
1142 printk(KERN_ERR
"btrfs: no missing devices found to "
1147 bdev
= open_bdev_exclusive(device_path
, FMODE_READ
,
1148 root
->fs_info
->bdev_holder
);
1150 ret
= PTR_ERR(bdev
);
1154 set_blocksize(bdev
, 4096);
1155 bh
= btrfs_read_dev_super(bdev
);
1160 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
1161 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
1162 dev_uuid
= disk_super
->dev_item
.uuid
;
1163 device
= btrfs_find_device(root
, devid
, dev_uuid
,
1171 if (device
->writeable
&& root
->fs_info
->fs_devices
->rw_devices
== 1) {
1172 printk(KERN_ERR
"btrfs: unable to remove the only writeable "
1178 if (device
->writeable
) {
1179 list_del_init(&device
->dev_alloc_list
);
1180 root
->fs_info
->fs_devices
->rw_devices
--;
1183 ret
= btrfs_shrink_device(device
, 0);
1187 ret
= btrfs_rm_dev_item(root
->fs_info
->chunk_root
, device
);
1191 device
->in_fs_metadata
= 0;
1192 list_del_init(&device
->dev_list
);
1193 device
->fs_devices
->num_devices
--;
1195 next_device
= list_entry(root
->fs_info
->fs_devices
->devices
.next
,
1196 struct btrfs_device
, dev_list
);
1197 if (device
->bdev
== root
->fs_info
->sb
->s_bdev
)
1198 root
->fs_info
->sb
->s_bdev
= next_device
->bdev
;
1199 if (device
->bdev
== root
->fs_info
->fs_devices
->latest_bdev
)
1200 root
->fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
1203 close_bdev_exclusive(device
->bdev
, device
->mode
);
1204 device
->bdev
= NULL
;
1205 device
->fs_devices
->open_devices
--;
1208 num_devices
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
1209 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
, num_devices
);
1211 if (device
->fs_devices
->open_devices
== 0) {
1212 struct btrfs_fs_devices
*fs_devices
;
1213 fs_devices
= root
->fs_info
->fs_devices
;
1214 while (fs_devices
) {
1215 if (fs_devices
->seed
== device
->fs_devices
)
1217 fs_devices
= fs_devices
->seed
;
1219 fs_devices
->seed
= device
->fs_devices
->seed
;
1220 device
->fs_devices
->seed
= NULL
;
1221 __btrfs_close_devices(device
->fs_devices
);
1222 free_fs_devices(device
->fs_devices
);
1226 * at this point, the device is zero sized. We want to
1227 * remove it from the devices list and zero out the old super
1229 if (device
->writeable
) {
1230 /* make sure this device isn't detected as part of
1233 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
1234 set_buffer_dirty(bh
);
1235 sync_dirty_buffer(bh
);
1238 kfree(device
->name
);
1246 close_bdev_exclusive(bdev
, FMODE_READ
);
1248 mutex_unlock(&root
->fs_info
->volume_mutex
);
1249 mutex_unlock(&uuid_mutex
);
1254 * does all the dirty work required for changing file system's UUID.
1256 static int btrfs_prepare_sprout(struct btrfs_trans_handle
*trans
,
1257 struct btrfs_root
*root
)
1259 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
1260 struct btrfs_fs_devices
*old_devices
;
1261 struct btrfs_fs_devices
*seed_devices
;
1262 struct btrfs_super_block
*disk_super
= &root
->fs_info
->super_copy
;
1263 struct btrfs_device
*device
;
1266 BUG_ON(!mutex_is_locked(&uuid_mutex
));
1267 if (!fs_devices
->seeding
)
1270 seed_devices
= kzalloc(sizeof(*fs_devices
), GFP_NOFS
);
1274 old_devices
= clone_fs_devices(fs_devices
);
1275 if (IS_ERR(old_devices
)) {
1276 kfree(seed_devices
);
1277 return PTR_ERR(old_devices
);
1280 list_add(&old_devices
->list
, &fs_uuids
);
1282 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
1283 seed_devices
->opened
= 1;
1284 INIT_LIST_HEAD(&seed_devices
->devices
);
1285 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
1286 list_splice_init(&fs_devices
->devices
, &seed_devices
->devices
);
1287 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
1288 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
) {
1289 device
->fs_devices
= seed_devices
;
1292 fs_devices
->seeding
= 0;
1293 fs_devices
->num_devices
= 0;
1294 fs_devices
->open_devices
= 0;
1295 fs_devices
->seed
= seed_devices
;
1297 generate_random_uuid(fs_devices
->fsid
);
1298 memcpy(root
->fs_info
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1299 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
1300 super_flags
= btrfs_super_flags(disk_super
) &
1301 ~BTRFS_SUPER_FLAG_SEEDING
;
1302 btrfs_set_super_flags(disk_super
, super_flags
);
1308 * strore the expected generation for seed devices in device items.
1310 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
,
1311 struct btrfs_root
*root
)
1313 struct btrfs_path
*path
;
1314 struct extent_buffer
*leaf
;
1315 struct btrfs_dev_item
*dev_item
;
1316 struct btrfs_device
*device
;
1317 struct btrfs_key key
;
1318 u8 fs_uuid
[BTRFS_UUID_SIZE
];
1319 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1323 path
= btrfs_alloc_path();
1327 root
= root
->fs_info
->chunk_root
;
1328 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1330 key
.type
= BTRFS_DEV_ITEM_KEY
;
1333 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1337 leaf
= path
->nodes
[0];
1339 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1340 ret
= btrfs_next_leaf(root
, path
);
1345 leaf
= path
->nodes
[0];
1346 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1347 btrfs_release_path(root
, path
);
1351 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1352 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
1353 key
.type
!= BTRFS_DEV_ITEM_KEY
)
1356 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1357 struct btrfs_dev_item
);
1358 devid
= btrfs_device_id(leaf
, dev_item
);
1359 read_extent_buffer(leaf
, dev_uuid
,
1360 (unsigned long)btrfs_device_uuid(dev_item
),
1362 read_extent_buffer(leaf
, fs_uuid
,
1363 (unsigned long)btrfs_device_fsid(dev_item
),
1365 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
1368 if (device
->fs_devices
->seeding
) {
1369 btrfs_set_device_generation(leaf
, dev_item
,
1370 device
->generation
);
1371 btrfs_mark_buffer_dirty(leaf
);
1379 btrfs_free_path(path
);
1383 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
1385 struct btrfs_trans_handle
*trans
;
1386 struct btrfs_device
*device
;
1387 struct block_device
*bdev
;
1388 struct list_head
*devices
;
1389 struct super_block
*sb
= root
->fs_info
->sb
;
1391 int seeding_dev
= 0;
1394 if ((sb
->s_flags
& MS_RDONLY
) && !root
->fs_info
->fs_devices
->seeding
)
1397 bdev
= open_bdev_exclusive(device_path
, 0, root
->fs_info
->bdev_holder
);
1401 if (root
->fs_info
->fs_devices
->seeding
) {
1403 down_write(&sb
->s_umount
);
1404 mutex_lock(&uuid_mutex
);
1407 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
1408 mutex_lock(&root
->fs_info
->volume_mutex
);
1410 devices
= &root
->fs_info
->fs_devices
->devices
;
1411 list_for_each_entry(device
, devices
, dev_list
) {
1412 if (device
->bdev
== bdev
) {
1418 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1420 /* we can safely leave the fs_devices entry around */
1425 device
->name
= kstrdup(device_path
, GFP_NOFS
);
1426 if (!device
->name
) {
1432 ret
= find_next_devid(root
, &device
->devid
);
1438 trans
= btrfs_start_transaction(root
, 1);
1441 device
->barriers
= 1;
1442 device
->writeable
= 1;
1443 device
->work
.func
= pending_bios_fn
;
1444 generate_random_uuid(device
->uuid
);
1445 spin_lock_init(&device
->io_lock
);
1446 device
->generation
= trans
->transid
;
1447 device
->io_width
= root
->sectorsize
;
1448 device
->io_align
= root
->sectorsize
;
1449 device
->sector_size
= root
->sectorsize
;
1450 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
1451 device
->disk_total_bytes
= device
->total_bytes
;
1452 device
->dev_root
= root
->fs_info
->dev_root
;
1453 device
->bdev
= bdev
;
1454 device
->in_fs_metadata
= 1;
1456 set_blocksize(device
->bdev
, 4096);
1459 sb
->s_flags
&= ~MS_RDONLY
;
1460 ret
= btrfs_prepare_sprout(trans
, root
);
1464 device
->fs_devices
= root
->fs_info
->fs_devices
;
1465 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
1466 list_add(&device
->dev_alloc_list
,
1467 &root
->fs_info
->fs_devices
->alloc_list
);
1468 root
->fs_info
->fs_devices
->num_devices
++;
1469 root
->fs_info
->fs_devices
->open_devices
++;
1470 root
->fs_info
->fs_devices
->rw_devices
++;
1471 root
->fs_info
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
1473 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
1474 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
1475 total_bytes
+ device
->total_bytes
);
1477 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
1478 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
1482 ret
= init_first_rw_device(trans
, root
, device
);
1484 ret
= btrfs_finish_sprout(trans
, root
);
1487 ret
= btrfs_add_device(trans
, root
, device
);
1491 * we've got more storage, clear any full flags on the space
1494 btrfs_clear_space_info_full(root
->fs_info
);
1496 unlock_chunks(root
);
1497 btrfs_commit_transaction(trans
, root
);
1500 mutex_unlock(&uuid_mutex
);
1501 up_write(&sb
->s_umount
);
1503 ret
= btrfs_relocate_sys_chunks(root
);
1507 mutex_unlock(&root
->fs_info
->volume_mutex
);
1510 close_bdev_exclusive(bdev
, 0);
1512 mutex_unlock(&uuid_mutex
);
1513 up_write(&sb
->s_umount
);
1518 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
1519 struct btrfs_device
*device
)
1522 struct btrfs_path
*path
;
1523 struct btrfs_root
*root
;
1524 struct btrfs_dev_item
*dev_item
;
1525 struct extent_buffer
*leaf
;
1526 struct btrfs_key key
;
1528 root
= device
->dev_root
->fs_info
->chunk_root
;
1530 path
= btrfs_alloc_path();
1534 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1535 key
.type
= BTRFS_DEV_ITEM_KEY
;
1536 key
.offset
= device
->devid
;
1538 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1547 leaf
= path
->nodes
[0];
1548 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1550 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1551 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1552 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1553 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1554 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1555 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->disk_total_bytes
);
1556 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
1557 btrfs_mark_buffer_dirty(leaf
);
1560 btrfs_free_path(path
);
1564 static int __btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1565 struct btrfs_device
*device
, u64 new_size
)
1567 struct btrfs_super_block
*super_copy
=
1568 &device
->dev_root
->fs_info
->super_copy
;
1569 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1570 u64 diff
= new_size
- device
->total_bytes
;
1572 if (!device
->writeable
)
1574 if (new_size
<= device
->total_bytes
)
1577 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
1578 device
->fs_devices
->total_rw_bytes
+= diff
;
1580 device
->total_bytes
= new_size
;
1581 btrfs_clear_space_info_full(device
->dev_root
->fs_info
);
1583 return btrfs_update_device(trans
, device
);
1586 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
1587 struct btrfs_device
*device
, u64 new_size
)
1590 lock_chunks(device
->dev_root
);
1591 ret
= __btrfs_grow_device(trans
, device
, new_size
);
1592 unlock_chunks(device
->dev_root
);
1596 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
1597 struct btrfs_root
*root
,
1598 u64 chunk_tree
, u64 chunk_objectid
,
1602 struct btrfs_path
*path
;
1603 struct btrfs_key key
;
1605 root
= root
->fs_info
->chunk_root
;
1606 path
= btrfs_alloc_path();
1610 key
.objectid
= chunk_objectid
;
1611 key
.offset
= chunk_offset
;
1612 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1614 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1617 ret
= btrfs_del_item(trans
, root
, path
);
1620 btrfs_free_path(path
);
1624 static int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
1627 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1628 struct btrfs_disk_key
*disk_key
;
1629 struct btrfs_chunk
*chunk
;
1636 struct btrfs_key key
;
1638 array_size
= btrfs_super_sys_array_size(super_copy
);
1640 ptr
= super_copy
->sys_chunk_array
;
1643 while (cur
< array_size
) {
1644 disk_key
= (struct btrfs_disk_key
*)ptr
;
1645 btrfs_disk_key_to_cpu(&key
, disk_key
);
1647 len
= sizeof(*disk_key
);
1649 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1650 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
1651 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
1652 len
+= btrfs_chunk_item_size(num_stripes
);
1657 if (key
.objectid
== chunk_objectid
&&
1658 key
.offset
== chunk_offset
) {
1659 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
1661 btrfs_set_super_sys_array_size(super_copy
, array_size
);
1670 static int btrfs_relocate_chunk(struct btrfs_root
*root
,
1671 u64 chunk_tree
, u64 chunk_objectid
,
1674 struct extent_map_tree
*em_tree
;
1675 struct btrfs_root
*extent_root
;
1676 struct btrfs_trans_handle
*trans
;
1677 struct extent_map
*em
;
1678 struct map_lookup
*map
;
1682 root
= root
->fs_info
->chunk_root
;
1683 extent_root
= root
->fs_info
->extent_root
;
1684 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
1686 /* step one, relocate all the extents inside this chunk */
1687 ret
= btrfs_relocate_block_group(extent_root
, chunk_offset
);
1690 trans
= btrfs_start_transaction(root
, 1);
1696 * step two, delete the device extents and the
1697 * chunk tree entries
1699 spin_lock(&em_tree
->lock
);
1700 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1701 spin_unlock(&em_tree
->lock
);
1703 BUG_ON(em
->start
> chunk_offset
||
1704 em
->start
+ em
->len
< chunk_offset
);
1705 map
= (struct map_lookup
*)em
->bdev
;
1707 for (i
= 0; i
< map
->num_stripes
; i
++) {
1708 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
1709 map
->stripes
[i
].physical
);
1712 if (map
->stripes
[i
].dev
) {
1713 ret
= btrfs_update_device(trans
, map
->stripes
[i
].dev
);
1717 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
1722 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1723 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
1727 ret
= btrfs_remove_block_group(trans
, extent_root
, chunk_offset
);
1730 spin_lock(&em_tree
->lock
);
1731 remove_extent_mapping(em_tree
, em
);
1732 spin_unlock(&em_tree
->lock
);
1737 /* once for the tree */
1738 free_extent_map(em
);
1740 free_extent_map(em
);
1742 unlock_chunks(root
);
1743 btrfs_end_transaction(trans
, root
);
1747 static int btrfs_relocate_sys_chunks(struct btrfs_root
*root
)
1749 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
1750 struct btrfs_path
*path
;
1751 struct extent_buffer
*leaf
;
1752 struct btrfs_chunk
*chunk
;
1753 struct btrfs_key key
;
1754 struct btrfs_key found_key
;
1755 u64 chunk_tree
= chunk_root
->root_key
.objectid
;
1759 path
= btrfs_alloc_path();
1763 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1764 key
.offset
= (u64
)-1;
1765 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1768 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1773 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
1780 leaf
= path
->nodes
[0];
1781 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1783 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
1784 struct btrfs_chunk
);
1785 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
1786 btrfs_release_path(chunk_root
, path
);
1788 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1789 ret
= btrfs_relocate_chunk(chunk_root
, chunk_tree
,
1795 if (found_key
.offset
== 0)
1797 key
.offset
= found_key
.offset
- 1;
1801 btrfs_free_path(path
);
1805 static u64
div_factor(u64 num
, int factor
)
1814 int btrfs_balance(struct btrfs_root
*dev_root
)
1817 struct list_head
*devices
= &dev_root
->fs_info
->fs_devices
->devices
;
1818 struct btrfs_device
*device
;
1821 struct btrfs_path
*path
;
1822 struct btrfs_key key
;
1823 struct btrfs_chunk
*chunk
;
1824 struct btrfs_root
*chunk_root
= dev_root
->fs_info
->chunk_root
;
1825 struct btrfs_trans_handle
*trans
;
1826 struct btrfs_key found_key
;
1828 if (dev_root
->fs_info
->sb
->s_flags
& MS_RDONLY
)
1831 mutex_lock(&dev_root
->fs_info
->volume_mutex
);
1832 dev_root
= dev_root
->fs_info
->dev_root
;
1834 /* step one make some room on all the devices */
1835 list_for_each_entry(device
, devices
, dev_list
) {
1836 old_size
= device
->total_bytes
;
1837 size_to_free
= div_factor(old_size
, 1);
1838 size_to_free
= min(size_to_free
, (u64
)1 * 1024 * 1024);
1839 if (!device
->writeable
||
1840 device
->total_bytes
- device
->bytes_used
> size_to_free
)
1843 ret
= btrfs_shrink_device(device
, old_size
- size_to_free
);
1846 trans
= btrfs_start_transaction(dev_root
, 1);
1849 ret
= btrfs_grow_device(trans
, device
, old_size
);
1852 btrfs_end_transaction(trans
, dev_root
);
1855 /* step two, relocate all the chunks */
1856 path
= btrfs_alloc_path();
1859 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1860 key
.offset
= (u64
)-1;
1861 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1864 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
1869 * this shouldn't happen, it means the last relocate
1875 ret
= btrfs_previous_item(chunk_root
, path
, 0,
1876 BTRFS_CHUNK_ITEM_KEY
);
1880 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1882 if (found_key
.objectid
!= key
.objectid
)
1885 chunk
= btrfs_item_ptr(path
->nodes
[0],
1887 struct btrfs_chunk
);
1888 key
.offset
= found_key
.offset
;
1889 /* chunk zero is special */
1890 if (key
.offset
== 0)
1893 btrfs_release_path(chunk_root
, path
);
1894 ret
= btrfs_relocate_chunk(chunk_root
,
1895 chunk_root
->root_key
.objectid
,
1902 btrfs_free_path(path
);
1903 mutex_unlock(&dev_root
->fs_info
->volume_mutex
);
1908 * shrinking a device means finding all of the device extents past
1909 * the new size, and then following the back refs to the chunks.
1910 * The chunk relocation code actually frees the device extent
1912 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
1914 struct btrfs_trans_handle
*trans
;
1915 struct btrfs_root
*root
= device
->dev_root
;
1916 struct btrfs_dev_extent
*dev_extent
= NULL
;
1917 struct btrfs_path
*path
;
1924 struct extent_buffer
*l
;
1925 struct btrfs_key key
;
1926 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1927 u64 old_total
= btrfs_super_total_bytes(super_copy
);
1928 u64 diff
= device
->total_bytes
- new_size
;
1930 if (new_size
>= device
->total_bytes
)
1933 path
= btrfs_alloc_path();
1937 trans
= btrfs_start_transaction(root
, 1);
1947 device
->total_bytes
= new_size
;
1948 if (device
->writeable
)
1949 device
->fs_devices
->total_rw_bytes
-= diff
;
1950 unlock_chunks(root
);
1951 btrfs_end_transaction(trans
, root
);
1953 key
.objectid
= device
->devid
;
1954 key
.offset
= (u64
)-1;
1955 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1958 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1962 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
1971 slot
= path
->slots
[0];
1972 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
1974 if (key
.objectid
!= device
->devid
)
1977 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1978 length
= btrfs_dev_extent_length(l
, dev_extent
);
1980 if (key
.offset
+ length
<= new_size
)
1983 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
1984 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
1985 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
1986 btrfs_release_path(root
, path
);
1988 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
1994 /* Shrinking succeeded, else we would be at "done". */
1995 trans
= btrfs_start_transaction(root
, 1);
2002 device
->disk_total_bytes
= new_size
;
2003 /* Now btrfs_update_device() will change the on-disk size. */
2004 ret
= btrfs_update_device(trans
, device
);
2006 unlock_chunks(root
);
2007 btrfs_end_transaction(trans
, root
);
2010 WARN_ON(diff
> old_total
);
2011 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
2012 unlock_chunks(root
);
2013 btrfs_end_transaction(trans
, root
);
2015 btrfs_free_path(path
);
2019 static int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
2020 struct btrfs_root
*root
,
2021 struct btrfs_key
*key
,
2022 struct btrfs_chunk
*chunk
, int item_size
)
2024 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
2025 struct btrfs_disk_key disk_key
;
2029 array_size
= btrfs_super_sys_array_size(super_copy
);
2030 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
2033 ptr
= super_copy
->sys_chunk_array
+ array_size
;
2034 btrfs_cpu_key_to_disk(&disk_key
, key
);
2035 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
2036 ptr
+= sizeof(disk_key
);
2037 memcpy(ptr
, chunk
, item_size
);
2038 item_size
+= sizeof(disk_key
);
2039 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
2043 static noinline u64
chunk_bytes_by_type(u64 type
, u64 calc_size
,
2044 int num_stripes
, int sub_stripes
)
2046 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
2048 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
2049 return calc_size
* (num_stripes
/ sub_stripes
);
2051 return calc_size
* num_stripes
;
2054 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2055 struct btrfs_root
*extent_root
,
2056 struct map_lookup
**map_ret
,
2057 u64
*num_bytes
, u64
*stripe_size
,
2058 u64 start
, u64 type
)
2060 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2061 struct btrfs_device
*device
= NULL
;
2062 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
2063 struct list_head
*cur
;
2064 struct map_lookup
*map
= NULL
;
2065 struct extent_map_tree
*em_tree
;
2066 struct extent_map
*em
;
2067 struct list_head private_devs
;
2068 int min_stripe_size
= 1 * 1024 * 1024;
2069 u64 calc_size
= 1024 * 1024 * 1024;
2070 u64 max_chunk_size
= calc_size
;
2075 int num_stripes
= 1;
2076 int min_stripes
= 1;
2077 int sub_stripes
= 0;
2081 int stripe_len
= 64 * 1024;
2083 if ((type
& BTRFS_BLOCK_GROUP_RAID1
) &&
2084 (type
& BTRFS_BLOCK_GROUP_DUP
)) {
2086 type
&= ~BTRFS_BLOCK_GROUP_DUP
;
2088 if (list_empty(&fs_devices
->alloc_list
))
2091 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
2092 num_stripes
= fs_devices
->rw_devices
;
2095 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
2099 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
2100 num_stripes
= min_t(u64
, 2, fs_devices
->rw_devices
);
2101 if (num_stripes
< 2)
2105 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2106 num_stripes
= fs_devices
->rw_devices
;
2107 if (num_stripes
< 4)
2109 num_stripes
&= ~(u32
)1;
2114 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
2115 max_chunk_size
= 10 * calc_size
;
2116 min_stripe_size
= 64 * 1024 * 1024;
2117 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
2118 max_chunk_size
= 4 * calc_size
;
2119 min_stripe_size
= 32 * 1024 * 1024;
2120 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2121 calc_size
= 8 * 1024 * 1024;
2122 max_chunk_size
= calc_size
* 2;
2123 min_stripe_size
= 1 * 1024 * 1024;
2126 /* we don't want a chunk larger than 10% of writeable space */
2127 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
2131 if (!map
|| map
->num_stripes
!= num_stripes
) {
2133 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
2136 map
->num_stripes
= num_stripes
;
2139 if (calc_size
* num_stripes
> max_chunk_size
) {
2140 calc_size
= max_chunk_size
;
2141 do_div(calc_size
, num_stripes
);
2142 do_div(calc_size
, stripe_len
);
2143 calc_size
*= stripe_len
;
2145 /* we don't want tiny stripes */
2146 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
2148 do_div(calc_size
, stripe_len
);
2149 calc_size
*= stripe_len
;
2151 cur
= fs_devices
->alloc_list
.next
;
2154 if (type
& BTRFS_BLOCK_GROUP_DUP
)
2155 min_free
= calc_size
* 2;
2157 min_free
= calc_size
;
2160 * we add 1MB because we never use the first 1MB of the device, unless
2161 * we've looped, then we are likely allocating the maximum amount of
2162 * space left already
2165 min_free
+= 1024 * 1024;
2167 INIT_LIST_HEAD(&private_devs
);
2168 while (index
< num_stripes
) {
2169 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
2170 BUG_ON(!device
->writeable
);
2171 if (device
->total_bytes
> device
->bytes_used
)
2172 avail
= device
->total_bytes
- device
->bytes_used
;
2177 if (device
->in_fs_metadata
&& avail
>= min_free
) {
2178 ret
= find_free_dev_extent(trans
, device
,
2179 min_free
, &dev_offset
);
2181 list_move_tail(&device
->dev_alloc_list
,
2183 map
->stripes
[index
].dev
= device
;
2184 map
->stripes
[index
].physical
= dev_offset
;
2186 if (type
& BTRFS_BLOCK_GROUP_DUP
) {
2187 map
->stripes
[index
].dev
= device
;
2188 map
->stripes
[index
].physical
=
2189 dev_offset
+ calc_size
;
2193 } else if (device
->in_fs_metadata
&& avail
> max_avail
)
2195 if (cur
== &fs_devices
->alloc_list
)
2198 list_splice(&private_devs
, &fs_devices
->alloc_list
);
2199 if (index
< num_stripes
) {
2200 if (index
>= min_stripes
) {
2201 num_stripes
= index
;
2202 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
2203 num_stripes
/= sub_stripes
;
2204 num_stripes
*= sub_stripes
;
2209 if (!looped
&& max_avail
> 0) {
2211 calc_size
= max_avail
;
2217 map
->sector_size
= extent_root
->sectorsize
;
2218 map
->stripe_len
= stripe_len
;
2219 map
->io_align
= stripe_len
;
2220 map
->io_width
= stripe_len
;
2222 map
->num_stripes
= num_stripes
;
2223 map
->sub_stripes
= sub_stripes
;
2226 *stripe_size
= calc_size
;
2227 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
2228 num_stripes
, sub_stripes
);
2230 em
= alloc_extent_map(GFP_NOFS
);
2235 em
->bdev
= (struct block_device
*)map
;
2237 em
->len
= *num_bytes
;
2238 em
->block_start
= 0;
2239 em
->block_len
= em
->len
;
2241 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
2242 spin_lock(&em_tree
->lock
);
2243 ret
= add_extent_mapping(em_tree
, em
);
2244 spin_unlock(&em_tree
->lock
);
2246 free_extent_map(em
);
2248 ret
= btrfs_make_block_group(trans
, extent_root
, 0, type
,
2249 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2254 while (index
< map
->num_stripes
) {
2255 device
= map
->stripes
[index
].dev
;
2256 dev_offset
= map
->stripes
[index
].physical
;
2258 ret
= btrfs_alloc_dev_extent(trans
, device
,
2259 info
->chunk_root
->root_key
.objectid
,
2260 BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2261 start
, dev_offset
, calc_size
);
2269 static int __finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
2270 struct btrfs_root
*extent_root
,
2271 struct map_lookup
*map
, u64 chunk_offset
,
2272 u64 chunk_size
, u64 stripe_size
)
2275 struct btrfs_key key
;
2276 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2277 struct btrfs_device
*device
;
2278 struct btrfs_chunk
*chunk
;
2279 struct btrfs_stripe
*stripe
;
2280 size_t item_size
= btrfs_chunk_item_size(map
->num_stripes
);
2284 chunk
= kzalloc(item_size
, GFP_NOFS
);
2289 while (index
< map
->num_stripes
) {
2290 device
= map
->stripes
[index
].dev
;
2291 device
->bytes_used
+= stripe_size
;
2292 ret
= btrfs_update_device(trans
, device
);
2298 stripe
= &chunk
->stripe
;
2299 while (index
< map
->num_stripes
) {
2300 device
= map
->stripes
[index
].dev
;
2301 dev_offset
= map
->stripes
[index
].physical
;
2303 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
2304 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
2305 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
2310 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
2311 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
2312 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
2313 btrfs_set_stack_chunk_type(chunk
, map
->type
);
2314 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
2315 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
2316 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
2317 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
2318 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
2320 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2321 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2322 key
.offset
= chunk_offset
;
2324 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
2327 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2328 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
, chunk
,
2337 * Chunk allocation falls into two parts. The first part does works
2338 * that make the new allocated chunk useable, but not do any operation
2339 * that modifies the chunk tree. The second part does the works that
2340 * require modifying the chunk tree. This division is important for the
2341 * bootstrap process of adding storage to a seed btrfs.
2343 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
2344 struct btrfs_root
*extent_root
, u64 type
)
2349 struct map_lookup
*map
;
2350 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
2353 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
2358 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2359 &stripe_size
, chunk_offset
, type
);
2363 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2364 chunk_size
, stripe_size
);
2369 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
,
2370 struct btrfs_root
*root
,
2371 struct btrfs_device
*device
)
2374 u64 sys_chunk_offset
;
2378 u64 sys_stripe_size
;
2380 struct map_lookup
*map
;
2381 struct map_lookup
*sys_map
;
2382 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2383 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2386 ret
= find_next_chunk(fs_info
->chunk_root
,
2387 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, &chunk_offset
);
2390 alloc_profile
= BTRFS_BLOCK_GROUP_METADATA
|
2391 (fs_info
->metadata_alloc_profile
&
2392 fs_info
->avail_metadata_alloc_bits
);
2393 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2395 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &map
, &chunk_size
,
2396 &stripe_size
, chunk_offset
, alloc_profile
);
2399 sys_chunk_offset
= chunk_offset
+ chunk_size
;
2401 alloc_profile
= BTRFS_BLOCK_GROUP_SYSTEM
|
2402 (fs_info
->system_alloc_profile
&
2403 fs_info
->avail_system_alloc_bits
);
2404 alloc_profile
= btrfs_reduce_alloc_profile(root
, alloc_profile
);
2406 ret
= __btrfs_alloc_chunk(trans
, extent_root
, &sys_map
,
2407 &sys_chunk_size
, &sys_stripe_size
,
2408 sys_chunk_offset
, alloc_profile
);
2411 ret
= btrfs_add_device(trans
, fs_info
->chunk_root
, device
);
2415 * Modifying chunk tree needs allocating new blocks from both
2416 * system block group and metadata block group. So we only can
2417 * do operations require modifying the chunk tree after both
2418 * block groups were created.
2420 ret
= __finish_chunk_alloc(trans
, extent_root
, map
, chunk_offset
,
2421 chunk_size
, stripe_size
);
2424 ret
= __finish_chunk_alloc(trans
, extent_root
, sys_map
,
2425 sys_chunk_offset
, sys_chunk_size
,
2431 int btrfs_chunk_readonly(struct btrfs_root
*root
, u64 chunk_offset
)
2433 struct extent_map
*em
;
2434 struct map_lookup
*map
;
2435 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
2439 spin_lock(&map_tree
->map_tree
.lock
);
2440 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2441 spin_unlock(&map_tree
->map_tree
.lock
);
2445 map
= (struct map_lookup
*)em
->bdev
;
2446 for (i
= 0; i
< map
->num_stripes
; i
++) {
2447 if (!map
->stripes
[i
].dev
->writeable
) {
2452 free_extent_map(em
);
2456 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
2458 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
2461 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
2463 struct extent_map
*em
;
2466 spin_lock(&tree
->map_tree
.lock
);
2467 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
2469 remove_extent_mapping(&tree
->map_tree
, em
);
2470 spin_unlock(&tree
->map_tree
.lock
);
2475 free_extent_map(em
);
2476 /* once for the tree */
2477 free_extent_map(em
);
2481 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
2483 struct extent_map
*em
;
2484 struct map_lookup
*map
;
2485 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2488 spin_lock(&em_tree
->lock
);
2489 em
= lookup_extent_mapping(em_tree
, logical
, len
);
2490 spin_unlock(&em_tree
->lock
);
2493 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2494 map
= (struct map_lookup
*)em
->bdev
;
2495 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
2496 ret
= map
->num_stripes
;
2497 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2498 ret
= map
->sub_stripes
;
2501 free_extent_map(em
);
2505 static int find_live_mirror(struct map_lookup
*map
, int first
, int num
,
2509 if (map
->stripes
[optimal
].dev
->bdev
)
2511 for (i
= first
; i
< first
+ num
; i
++) {
2512 if (map
->stripes
[i
].dev
->bdev
)
2515 /* we couldn't find one that doesn't fail. Just return something
2516 * and the io error handling code will clean up eventually
2521 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2522 u64 logical
, u64
*length
,
2523 struct btrfs_multi_bio
**multi_ret
,
2524 int mirror_num
, struct page
*unplug_page
)
2526 struct extent_map
*em
;
2527 struct map_lookup
*map
;
2528 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2532 int stripes_allocated
= 8;
2533 int stripes_required
= 1;
2538 struct btrfs_multi_bio
*multi
= NULL
;
2540 if (multi_ret
&& !(rw
& (1 << BIO_RW
)))
2541 stripes_allocated
= 1;
2544 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
2549 atomic_set(&multi
->error
, 0);
2552 spin_lock(&em_tree
->lock
);
2553 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
2554 spin_unlock(&em_tree
->lock
);
2556 if (!em
&& unplug_page
)
2560 printk(KERN_CRIT
"unable to find logical %llu len %llu\n",
2561 (unsigned long long)logical
,
2562 (unsigned long long)*length
);
2566 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
2567 map
= (struct map_lookup
*)em
->bdev
;
2568 offset
= logical
- em
->start
;
2570 if (mirror_num
> map
->num_stripes
)
2573 /* if our multi bio struct is too small, back off and try again */
2574 if (rw
& (1 << BIO_RW
)) {
2575 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
2576 BTRFS_BLOCK_GROUP_DUP
)) {
2577 stripes_required
= map
->num_stripes
;
2579 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2580 stripes_required
= map
->sub_stripes
;
2584 if (multi_ret
&& (rw
& (1 << BIO_RW
)) &&
2585 stripes_allocated
< stripes_required
) {
2586 stripes_allocated
= map
->num_stripes
;
2587 free_extent_map(em
);
2593 * stripe_nr counts the total number of stripes we have to stride
2594 * to get to this block
2596 do_div(stripe_nr
, map
->stripe_len
);
2598 stripe_offset
= stripe_nr
* map
->stripe_len
;
2599 BUG_ON(offset
< stripe_offset
);
2601 /* stripe_offset is the offset of this block in its stripe*/
2602 stripe_offset
= offset
- stripe_offset
;
2604 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
2605 BTRFS_BLOCK_GROUP_RAID10
|
2606 BTRFS_BLOCK_GROUP_DUP
)) {
2607 /* we limit the length of each bio to what fits in a stripe */
2608 *length
= min_t(u64
, em
->len
- offset
,
2609 map
->stripe_len
- stripe_offset
);
2611 *length
= em
->len
- offset
;
2614 if (!multi_ret
&& !unplug_page
)
2619 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
2620 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2621 num_stripes
= map
->num_stripes
;
2622 else if (mirror_num
)
2623 stripe_index
= mirror_num
- 1;
2625 stripe_index
= find_live_mirror(map
, 0,
2627 current
->pid
% map
->num_stripes
);
2630 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
2631 if (rw
& (1 << BIO_RW
))
2632 num_stripes
= map
->num_stripes
;
2633 else if (mirror_num
)
2634 stripe_index
= mirror_num
- 1;
2636 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2637 int factor
= map
->num_stripes
/ map
->sub_stripes
;
2639 stripe_index
= do_div(stripe_nr
, factor
);
2640 stripe_index
*= map
->sub_stripes
;
2642 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
2643 num_stripes
= map
->sub_stripes
;
2644 else if (mirror_num
)
2645 stripe_index
+= mirror_num
- 1;
2647 stripe_index
= find_live_mirror(map
, stripe_index
,
2648 map
->sub_stripes
, stripe_index
+
2649 current
->pid
% map
->sub_stripes
);
2653 * after this do_div call, stripe_nr is the number of stripes
2654 * on this device we have to walk to find the data, and
2655 * stripe_index is the number of our device in the stripe array
2657 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
2659 BUG_ON(stripe_index
>= map
->num_stripes
);
2661 for (i
= 0; i
< num_stripes
; i
++) {
2663 struct btrfs_device
*device
;
2664 struct backing_dev_info
*bdi
;
2666 device
= map
->stripes
[stripe_index
].dev
;
2668 bdi
= blk_get_backing_dev_info(device
->bdev
);
2669 if (bdi
->unplug_io_fn
)
2670 bdi
->unplug_io_fn(bdi
, unplug_page
);
2673 multi
->stripes
[i
].physical
=
2674 map
->stripes
[stripe_index
].physical
+
2675 stripe_offset
+ stripe_nr
* map
->stripe_len
;
2676 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
2682 multi
->num_stripes
= num_stripes
;
2683 multi
->max_errors
= max_errors
;
2686 free_extent_map(em
);
2690 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
2691 u64 logical
, u64
*length
,
2692 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
2694 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
2698 int btrfs_rmap_block(struct btrfs_mapping_tree
*map_tree
,
2699 u64 chunk_start
, u64 physical
, u64 devid
,
2700 u64
**logical
, int *naddrs
, int *stripe_len
)
2702 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
2703 struct extent_map
*em
;
2704 struct map_lookup
*map
;
2711 spin_lock(&em_tree
->lock
);
2712 em
= lookup_extent_mapping(em_tree
, chunk_start
, 1);
2713 spin_unlock(&em_tree
->lock
);
2715 BUG_ON(!em
|| em
->start
!= chunk_start
);
2716 map
= (struct map_lookup
*)em
->bdev
;
2719 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
2720 do_div(length
, map
->num_stripes
/ map
->sub_stripes
);
2721 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
2722 do_div(length
, map
->num_stripes
);
2724 buf
= kzalloc(sizeof(u64
) * map
->num_stripes
, GFP_NOFS
);
2727 for (i
= 0; i
< map
->num_stripes
; i
++) {
2728 if (devid
&& map
->stripes
[i
].dev
->devid
!= devid
)
2730 if (map
->stripes
[i
].physical
> physical
||
2731 map
->stripes
[i
].physical
+ length
<= physical
)
2734 stripe_nr
= physical
- map
->stripes
[i
].physical
;
2735 do_div(stripe_nr
, map
->stripe_len
);
2737 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
2738 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2739 do_div(stripe_nr
, map
->sub_stripes
);
2740 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
2741 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
2743 bytenr
= chunk_start
+ stripe_nr
* map
->stripe_len
;
2744 WARN_ON(nr
>= map
->num_stripes
);
2745 for (j
= 0; j
< nr
; j
++) {
2746 if (buf
[j
] == bytenr
)
2750 WARN_ON(nr
>= map
->num_stripes
);
2755 for (i
= 0; i
> nr
; i
++) {
2756 struct btrfs_multi_bio
*multi
;
2757 struct btrfs_bio_stripe
*stripe
;
2761 ret
= btrfs_map_block(map_tree
, WRITE
, buf
[i
],
2762 &length
, &multi
, 0);
2765 stripe
= multi
->stripes
;
2766 for (j
= 0; j
< multi
->num_stripes
; j
++) {
2767 if (stripe
->physical
>= physical
&&
2768 physical
< stripe
->physical
+ length
)
2771 BUG_ON(j
>= multi
->num_stripes
);
2777 *stripe_len
= map
->stripe_len
;
2779 free_extent_map(em
);
2783 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
2784 u64 logical
, struct page
*page
)
2786 u64 length
= PAGE_CACHE_SIZE
;
2787 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
2791 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
2793 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
2794 int is_orig_bio
= 0;
2797 atomic_inc(&multi
->error
);
2799 if (bio
== multi
->orig_bio
)
2802 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
2805 bio
= multi
->orig_bio
;
2807 bio
->bi_private
= multi
->private;
2808 bio
->bi_end_io
= multi
->end_io
;
2809 /* only send an error to the higher layers if it is
2810 * beyond the tolerance of the multi-bio
2812 if (atomic_read(&multi
->error
) > multi
->max_errors
) {
2816 * this bio is actually up to date, we didn't
2817 * go over the max number of errors
2819 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2824 bio_endio(bio
, err
);
2825 } else if (!is_orig_bio
) {
2830 struct async_sched
{
2833 struct btrfs_fs_info
*info
;
2834 struct btrfs_work work
;
2838 * see run_scheduled_bios for a description of why bios are collected for
2841 * This will add one bio to the pending list for a device and make sure
2842 * the work struct is scheduled.
2844 static noinline
int schedule_bio(struct btrfs_root
*root
,
2845 struct btrfs_device
*device
,
2846 int rw
, struct bio
*bio
)
2848 int should_queue
= 1;
2849 struct btrfs_pending_bios
*pending_bios
;
2851 /* don't bother with additional async steps for reads, right now */
2852 if (!(rw
& (1 << BIO_RW
))) {
2854 submit_bio(rw
, bio
);
2860 * nr_async_bios allows us to reliably return congestion to the
2861 * higher layers. Otherwise, the async bio makes it appear we have
2862 * made progress against dirty pages when we've really just put it
2863 * on a queue for later
2865 atomic_inc(&root
->fs_info
->nr_async_bios
);
2866 WARN_ON(bio
->bi_next
);
2867 bio
->bi_next
= NULL
;
2870 spin_lock(&device
->io_lock
);
2872 pending_bios
= &device
->pending_sync_bios
;
2874 pending_bios
= &device
->pending_bios
;
2876 if (pending_bios
->tail
)
2877 pending_bios
->tail
->bi_next
= bio
;
2879 pending_bios
->tail
= bio
;
2880 if (!pending_bios
->head
)
2881 pending_bios
->head
= bio
;
2882 if (device
->running_pending
)
2885 spin_unlock(&device
->io_lock
);
2888 btrfs_queue_worker(&root
->fs_info
->submit_workers
,
2893 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
2894 int mirror_num
, int async_submit
)
2896 struct btrfs_mapping_tree
*map_tree
;
2897 struct btrfs_device
*dev
;
2898 struct bio
*first_bio
= bio
;
2899 u64 logical
= (u64
)bio
->bi_sector
<< 9;
2902 struct btrfs_multi_bio
*multi
= NULL
;
2907 length
= bio
->bi_size
;
2908 map_tree
= &root
->fs_info
->mapping_tree
;
2909 map_length
= length
;
2911 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
2915 total_devs
= multi
->num_stripes
;
2916 if (map_length
< length
) {
2917 printk(KERN_CRIT
"mapping failed logical %llu bio len %llu "
2918 "len %llu\n", (unsigned long long)logical
,
2919 (unsigned long long)length
,
2920 (unsigned long long)map_length
);
2923 multi
->end_io
= first_bio
->bi_end_io
;
2924 multi
->private = first_bio
->bi_private
;
2925 multi
->orig_bio
= first_bio
;
2926 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
2928 while (dev_nr
< total_devs
) {
2929 if (total_devs
> 1) {
2930 if (dev_nr
< total_devs
- 1) {
2931 bio
= bio_clone(first_bio
, GFP_NOFS
);
2936 bio
->bi_private
= multi
;
2937 bio
->bi_end_io
= end_bio_multi_stripe
;
2939 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
2940 dev
= multi
->stripes
[dev_nr
].dev
;
2941 BUG_ON(rw
== WRITE
&& !dev
->writeable
);
2942 if (dev
&& dev
->bdev
) {
2943 bio
->bi_bdev
= dev
->bdev
;
2945 schedule_bio(root
, dev
, rw
, bio
);
2947 submit_bio(rw
, bio
);
2949 bio
->bi_bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
2950 bio
->bi_sector
= logical
>> 9;
2951 bio_endio(bio
, -EIO
);
2955 if (total_devs
== 1)
2960 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
2963 struct btrfs_device
*device
;
2964 struct btrfs_fs_devices
*cur_devices
;
2966 cur_devices
= root
->fs_info
->fs_devices
;
2967 while (cur_devices
) {
2969 !memcmp(cur_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
2970 device
= __find_device(&cur_devices
->devices
,
2975 cur_devices
= cur_devices
->seed
;
2980 static struct btrfs_device
*add_missing_dev(struct btrfs_root
*root
,
2981 u64 devid
, u8
*dev_uuid
)
2983 struct btrfs_device
*device
;
2984 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
2986 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
2989 list_add(&device
->dev_list
,
2990 &fs_devices
->devices
);
2991 device
->barriers
= 1;
2992 device
->dev_root
= root
->fs_info
->dev_root
;
2993 device
->devid
= devid
;
2994 device
->work
.func
= pending_bios_fn
;
2995 device
->fs_devices
= fs_devices
;
2996 fs_devices
->num_devices
++;
2997 spin_lock_init(&device
->io_lock
);
2998 INIT_LIST_HEAD(&device
->dev_alloc_list
);
2999 memcpy(device
->uuid
, dev_uuid
, BTRFS_UUID_SIZE
);
3003 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
3004 struct extent_buffer
*leaf
,
3005 struct btrfs_chunk
*chunk
)
3007 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
3008 struct map_lookup
*map
;
3009 struct extent_map
*em
;
3013 u8 uuid
[BTRFS_UUID_SIZE
];
3018 logical
= key
->offset
;
3019 length
= btrfs_chunk_length(leaf
, chunk
);
3021 spin_lock(&map_tree
->map_tree
.lock
);
3022 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
3023 spin_unlock(&map_tree
->map_tree
.lock
);
3025 /* already mapped? */
3026 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
3027 free_extent_map(em
);
3030 free_extent_map(em
);
3033 em
= alloc_extent_map(GFP_NOFS
);
3036 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3037 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
3039 free_extent_map(em
);
3043 em
->bdev
= (struct block_device
*)map
;
3044 em
->start
= logical
;
3046 em
->block_start
= 0;
3047 em
->block_len
= em
->len
;
3049 map
->num_stripes
= num_stripes
;
3050 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
3051 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
3052 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
3053 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
3054 map
->type
= btrfs_chunk_type(leaf
, chunk
);
3055 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
3056 for (i
= 0; i
< num_stripes
; i
++) {
3057 map
->stripes
[i
].physical
=
3058 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
3059 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
3060 read_extent_buffer(leaf
, uuid
, (unsigned long)
3061 btrfs_stripe_dev_uuid_nr(chunk
, i
),
3063 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
,
3065 if (!map
->stripes
[i
].dev
&& !btrfs_test_opt(root
, DEGRADED
)) {
3067 free_extent_map(em
);
3070 if (!map
->stripes
[i
].dev
) {
3071 map
->stripes
[i
].dev
=
3072 add_missing_dev(root
, devid
, uuid
);
3073 if (!map
->stripes
[i
].dev
) {
3075 free_extent_map(em
);
3079 map
->stripes
[i
].dev
->in_fs_metadata
= 1;
3082 spin_lock(&map_tree
->map_tree
.lock
);
3083 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
3084 spin_unlock(&map_tree
->map_tree
.lock
);
3086 free_extent_map(em
);
3091 static int fill_device_from_item(struct extent_buffer
*leaf
,
3092 struct btrfs_dev_item
*dev_item
,
3093 struct btrfs_device
*device
)
3097 device
->devid
= btrfs_device_id(leaf
, dev_item
);
3098 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
3099 device
->total_bytes
= device
->disk_total_bytes
;
3100 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
3101 device
->type
= btrfs_device_type(leaf
, dev_item
);
3102 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
3103 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
3104 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
3106 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
3107 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
3112 static int open_seed_devices(struct btrfs_root
*root
, u8
*fsid
)
3114 struct btrfs_fs_devices
*fs_devices
;
3117 mutex_lock(&uuid_mutex
);
3119 fs_devices
= root
->fs_info
->fs_devices
->seed
;
3120 while (fs_devices
) {
3121 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_UUID_SIZE
)) {
3125 fs_devices
= fs_devices
->seed
;
3128 fs_devices
= find_fsid(fsid
);
3134 fs_devices
= clone_fs_devices(fs_devices
);
3135 if (IS_ERR(fs_devices
)) {
3136 ret
= PTR_ERR(fs_devices
);
3140 ret
= __btrfs_open_devices(fs_devices
, FMODE_READ
,
3141 root
->fs_info
->bdev_holder
);
3145 if (!fs_devices
->seeding
) {
3146 __btrfs_close_devices(fs_devices
);
3147 free_fs_devices(fs_devices
);
3152 fs_devices
->seed
= root
->fs_info
->fs_devices
->seed
;
3153 root
->fs_info
->fs_devices
->seed
= fs_devices
;
3155 mutex_unlock(&uuid_mutex
);
3159 static int read_one_dev(struct btrfs_root
*root
,
3160 struct extent_buffer
*leaf
,
3161 struct btrfs_dev_item
*dev_item
)
3163 struct btrfs_device
*device
;
3166 u8 fs_uuid
[BTRFS_UUID_SIZE
];
3167 u8 dev_uuid
[BTRFS_UUID_SIZE
];
3169 devid
= btrfs_device_id(leaf
, dev_item
);
3170 read_extent_buffer(leaf
, dev_uuid
,
3171 (unsigned long)btrfs_device_uuid(dev_item
),
3173 read_extent_buffer(leaf
, fs_uuid
,
3174 (unsigned long)btrfs_device_fsid(dev_item
),
3177 if (memcmp(fs_uuid
, root
->fs_info
->fsid
, BTRFS_UUID_SIZE
)) {
3178 ret
= open_seed_devices(root
, fs_uuid
);
3179 if (ret
&& !btrfs_test_opt(root
, DEGRADED
))
3183 device
= btrfs_find_device(root
, devid
, dev_uuid
, fs_uuid
);
3184 if (!device
|| !device
->bdev
) {
3185 if (!btrfs_test_opt(root
, DEGRADED
))
3189 printk(KERN_WARNING
"warning devid %llu missing\n",
3190 (unsigned long long)devid
);
3191 device
= add_missing_dev(root
, devid
, dev_uuid
);
3197 if (device
->fs_devices
!= root
->fs_info
->fs_devices
) {
3198 BUG_ON(device
->writeable
);
3199 if (device
->generation
!=
3200 btrfs_device_generation(leaf
, dev_item
))
3204 fill_device_from_item(leaf
, dev_item
, device
);
3205 device
->dev_root
= root
->fs_info
->dev_root
;
3206 device
->in_fs_metadata
= 1;
3207 if (device
->writeable
)
3208 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
3213 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
3215 struct btrfs_dev_item
*dev_item
;
3217 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
3219 return read_one_dev(root
, buf
, dev_item
);
3222 int btrfs_read_sys_array(struct btrfs_root
*root
)
3224 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
3225 struct extent_buffer
*sb
;
3226 struct btrfs_disk_key
*disk_key
;
3227 struct btrfs_chunk
*chunk
;
3229 unsigned long sb_ptr
;
3235 struct btrfs_key key
;
3237 sb
= btrfs_find_create_tree_block(root
, BTRFS_SUPER_INFO_OFFSET
,
3238 BTRFS_SUPER_INFO_SIZE
);
3241 btrfs_set_buffer_uptodate(sb
);
3242 btrfs_set_buffer_lockdep_class(sb
, 0);
3244 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
3245 array_size
= btrfs_super_sys_array_size(super_copy
);
3247 ptr
= super_copy
->sys_chunk_array
;
3248 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
3251 while (cur
< array_size
) {
3252 disk_key
= (struct btrfs_disk_key
*)ptr
;
3253 btrfs_disk_key_to_cpu(&key
, disk_key
);
3255 len
= sizeof(*disk_key
); ptr
+= len
;
3259 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3260 chunk
= (struct btrfs_chunk
*)sb_ptr
;
3261 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
3264 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
3265 len
= btrfs_chunk_item_size(num_stripes
);
3274 free_extent_buffer(sb
);
3278 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
3280 struct btrfs_path
*path
;
3281 struct extent_buffer
*leaf
;
3282 struct btrfs_key key
;
3283 struct btrfs_key found_key
;
3287 root
= root
->fs_info
->chunk_root
;
3289 path
= btrfs_alloc_path();
3293 /* first we search for all of the device items, and then we
3294 * read in all of the chunk items. This way we can create chunk
3295 * mappings that reference all of the devices that are afound
3297 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
3301 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3303 leaf
= path
->nodes
[0];
3304 slot
= path
->slots
[0];
3305 if (slot
>= btrfs_header_nritems(leaf
)) {
3306 ret
= btrfs_next_leaf(root
, path
);
3313 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3314 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3315 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
3317 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
3318 struct btrfs_dev_item
*dev_item
;
3319 dev_item
= btrfs_item_ptr(leaf
, slot
,
3320 struct btrfs_dev_item
);
3321 ret
= read_one_dev(root
, leaf
, dev_item
);
3325 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
3326 struct btrfs_chunk
*chunk
;
3327 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3328 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
3334 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
3336 btrfs_release_path(root
, path
);
3341 btrfs_free_path(path
);