btrfs: create helper btrfs_find_device_by_user_input()
[deliverable/linux.git] / fs / btrfs / volumes.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/buffer_head.h>
22#include <linux/blkdev.h>
23#include <linux/random.h>
24#include <linux/iocontext.h>
25#include <linux/capability.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include <linux/raid/pq.h>
29#include <linux/semaphore.h>
30#include <asm/div64.h>
31#include "ctree.h"
32#include "extent_map.h"
33#include "disk-io.h"
34#include "transaction.h"
35#include "print-tree.h"
36#include "volumes.h"
37#include "raid56.h"
38#include "async-thread.h"
39#include "check-integrity.h"
40#include "rcu-string.h"
41#include "math.h"
42#include "dev-replace.h"
43#include "sysfs.h"
44
45const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 [BTRFS_RAID_RAID10] = {
47 .sub_stripes = 2,
48 .dev_stripes = 1,
49 .devs_max = 0, /* 0 == as many as possible */
50 .devs_min = 4,
51 .tolerated_failures = 1,
52 .devs_increment = 2,
53 .ncopies = 2,
54 },
55 [BTRFS_RAID_RAID1] = {
56 .sub_stripes = 1,
57 .dev_stripes = 1,
58 .devs_max = 2,
59 .devs_min = 2,
60 .tolerated_failures = 1,
61 .devs_increment = 2,
62 .ncopies = 2,
63 },
64 [BTRFS_RAID_DUP] = {
65 .sub_stripes = 1,
66 .dev_stripes = 2,
67 .devs_max = 1,
68 .devs_min = 1,
69 .tolerated_failures = 0,
70 .devs_increment = 1,
71 .ncopies = 2,
72 },
73 [BTRFS_RAID_RAID0] = {
74 .sub_stripes = 1,
75 .dev_stripes = 1,
76 .devs_max = 0,
77 .devs_min = 2,
78 .tolerated_failures = 0,
79 .devs_increment = 1,
80 .ncopies = 1,
81 },
82 [BTRFS_RAID_SINGLE] = {
83 .sub_stripes = 1,
84 .dev_stripes = 1,
85 .devs_max = 1,
86 .devs_min = 1,
87 .tolerated_failures = 0,
88 .devs_increment = 1,
89 .ncopies = 1,
90 },
91 [BTRFS_RAID_RAID5] = {
92 .sub_stripes = 1,
93 .dev_stripes = 1,
94 .devs_max = 0,
95 .devs_min = 2,
96 .tolerated_failures = 1,
97 .devs_increment = 1,
98 .ncopies = 2,
99 },
100 [BTRFS_RAID_RAID6] = {
101 .sub_stripes = 1,
102 .dev_stripes = 1,
103 .devs_max = 0,
104 .devs_min = 3,
105 .tolerated_failures = 2,
106 .devs_increment = 1,
107 .ncopies = 3,
108 },
109};
110
111const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
114 [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
115 [BTRFS_RAID_RAID0] = BTRFS_BLOCK_GROUP_RAID0,
116 [BTRFS_RAID_SINGLE] = 0,
117 [BTRFS_RAID_RAID5] = BTRFS_BLOCK_GROUP_RAID5,
118 [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6,
119};
120
121static int init_first_rw_device(struct btrfs_trans_handle *trans,
122 struct btrfs_root *root,
123 struct btrfs_device *device);
124static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
125static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
126static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
127static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
128static void btrfs_close_one_device(struct btrfs_device *device);
129
130DEFINE_MUTEX(uuid_mutex);
131static LIST_HEAD(fs_uuids);
132struct list_head *btrfs_get_fs_uuids(void)
133{
134 return &fs_uuids;
135}
136
137static struct btrfs_fs_devices *__alloc_fs_devices(void)
138{
139 struct btrfs_fs_devices *fs_devs;
140
141 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
142 if (!fs_devs)
143 return ERR_PTR(-ENOMEM);
144
145 mutex_init(&fs_devs->device_list_mutex);
146
147 INIT_LIST_HEAD(&fs_devs->devices);
148 INIT_LIST_HEAD(&fs_devs->resized_devices);
149 INIT_LIST_HEAD(&fs_devs->alloc_list);
150 INIT_LIST_HEAD(&fs_devs->list);
151
152 return fs_devs;
153}
154
155/**
156 * alloc_fs_devices - allocate struct btrfs_fs_devices
157 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is
158 * generated.
159 *
160 * Return: a pointer to a new &struct btrfs_fs_devices on success;
161 * ERR_PTR() on error. Returned struct is not linked onto any lists and
162 * can be destroyed with kfree() right away.
163 */
164static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
165{
166 struct btrfs_fs_devices *fs_devs;
167
168 fs_devs = __alloc_fs_devices();
169 if (IS_ERR(fs_devs))
170 return fs_devs;
171
172 if (fsid)
173 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
174 else
175 generate_random_uuid(fs_devs->fsid);
176
177 return fs_devs;
178}
179
180static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
181{
182 struct btrfs_device *device;
183 WARN_ON(fs_devices->opened);
184 while (!list_empty(&fs_devices->devices)) {
185 device = list_entry(fs_devices->devices.next,
186 struct btrfs_device, dev_list);
187 list_del(&device->dev_list);
188 rcu_string_free(device->name);
189 kfree(device);
190 }
191 kfree(fs_devices);
192}
193
194static void btrfs_kobject_uevent(struct block_device *bdev,
195 enum kobject_action action)
196{
197 int ret;
198
199 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
200 if (ret)
201 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
202 action,
203 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
204 &disk_to_dev(bdev->bd_disk)->kobj);
205}
206
207void btrfs_cleanup_fs_uuids(void)
208{
209 struct btrfs_fs_devices *fs_devices;
210
211 while (!list_empty(&fs_uuids)) {
212 fs_devices = list_entry(fs_uuids.next,
213 struct btrfs_fs_devices, list);
214 list_del(&fs_devices->list);
215 free_fs_devices(fs_devices);
216 }
217}
218
219static struct btrfs_device *__alloc_device(void)
220{
221 struct btrfs_device *dev;
222
223 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
224 if (!dev)
225 return ERR_PTR(-ENOMEM);
226
227 INIT_LIST_HEAD(&dev->dev_list);
228 INIT_LIST_HEAD(&dev->dev_alloc_list);
229 INIT_LIST_HEAD(&dev->resized_list);
230
231 spin_lock_init(&dev->io_lock);
232
233 spin_lock_init(&dev->reada_lock);
234 atomic_set(&dev->reada_in_flight, 0);
235 atomic_set(&dev->dev_stats_ccnt, 0);
236 btrfs_device_data_ordered_init(dev);
237 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
238 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
239
240 return dev;
241}
242
243static noinline struct btrfs_device *__find_device(struct list_head *head,
244 u64 devid, u8 *uuid)
245{
246 struct btrfs_device *dev;
247
248 list_for_each_entry(dev, head, dev_list) {
249 if (dev->devid == devid &&
250 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
251 return dev;
252 }
253 }
254 return NULL;
255}
256
257static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
258{
259 struct btrfs_fs_devices *fs_devices;
260
261 list_for_each_entry(fs_devices, &fs_uuids, list) {
262 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
263 return fs_devices;
264 }
265 return NULL;
266}
267
268static int
269btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
270 int flush, struct block_device **bdev,
271 struct buffer_head **bh)
272{
273 int ret;
274
275 *bdev = blkdev_get_by_path(device_path, flags, holder);
276
277 if (IS_ERR(*bdev)) {
278 ret = PTR_ERR(*bdev);
279 goto error;
280 }
281
282 if (flush)
283 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
284 ret = set_blocksize(*bdev, 4096);
285 if (ret) {
286 blkdev_put(*bdev, flags);
287 goto error;
288 }
289 invalidate_bdev(*bdev);
290 *bh = btrfs_read_dev_super(*bdev);
291 if (IS_ERR(*bh)) {
292 ret = PTR_ERR(*bh);
293 blkdev_put(*bdev, flags);
294 goto error;
295 }
296
297 return 0;
298
299error:
300 *bdev = NULL;
301 *bh = NULL;
302 return ret;
303}
304
305static void requeue_list(struct btrfs_pending_bios *pending_bios,
306 struct bio *head, struct bio *tail)
307{
308
309 struct bio *old_head;
310
311 old_head = pending_bios->head;
312 pending_bios->head = head;
313 if (pending_bios->tail)
314 tail->bi_next = old_head;
315 else
316 pending_bios->tail = tail;
317}
318
319/*
320 * we try to collect pending bios for a device so we don't get a large
321 * number of procs sending bios down to the same device. This greatly
322 * improves the schedulers ability to collect and merge the bios.
323 *
324 * But, it also turns into a long list of bios to process and that is sure
325 * to eventually make the worker thread block. The solution here is to
326 * make some progress and then put this work struct back at the end of
327 * the list if the block device is congested. This way, multiple devices
328 * can make progress from a single worker thread.
329 */
330static noinline void run_scheduled_bios(struct btrfs_device *device)
331{
332 struct bio *pending;
333 struct backing_dev_info *bdi;
334 struct btrfs_fs_info *fs_info;
335 struct btrfs_pending_bios *pending_bios;
336 struct bio *tail;
337 struct bio *cur;
338 int again = 0;
339 unsigned long num_run;
340 unsigned long batch_run = 0;
341 unsigned long limit;
342 unsigned long last_waited = 0;
343 int force_reg = 0;
344 int sync_pending = 0;
345 struct blk_plug plug;
346
347 /*
348 * this function runs all the bios we've collected for
349 * a particular device. We don't want to wander off to
350 * another device without first sending all of these down.
351 * So, setup a plug here and finish it off before we return
352 */
353 blk_start_plug(&plug);
354
355 bdi = blk_get_backing_dev_info(device->bdev);
356 fs_info = device->dev_root->fs_info;
357 limit = btrfs_async_submit_limit(fs_info);
358 limit = limit * 2 / 3;
359
360loop:
361 spin_lock(&device->io_lock);
362
363loop_lock:
364 num_run = 0;
365
366 /* take all the bios off the list at once and process them
367 * later on (without the lock held). But, remember the
368 * tail and other pointers so the bios can be properly reinserted
369 * into the list if we hit congestion
370 */
371 if (!force_reg && device->pending_sync_bios.head) {
372 pending_bios = &device->pending_sync_bios;
373 force_reg = 1;
374 } else {
375 pending_bios = &device->pending_bios;
376 force_reg = 0;
377 }
378
379 pending = pending_bios->head;
380 tail = pending_bios->tail;
381 WARN_ON(pending && !tail);
382
383 /*
384 * if pending was null this time around, no bios need processing
385 * at all and we can stop. Otherwise it'll loop back up again
386 * and do an additional check so no bios are missed.
387 *
388 * device->running_pending is used to synchronize with the
389 * schedule_bio code.
390 */
391 if (device->pending_sync_bios.head == NULL &&
392 device->pending_bios.head == NULL) {
393 again = 0;
394 device->running_pending = 0;
395 } else {
396 again = 1;
397 device->running_pending = 1;
398 }
399
400 pending_bios->head = NULL;
401 pending_bios->tail = NULL;
402
403 spin_unlock(&device->io_lock);
404
405 while (pending) {
406
407 rmb();
408 /* we want to work on both lists, but do more bios on the
409 * sync list than the regular list
410 */
411 if ((num_run > 32 &&
412 pending_bios != &device->pending_sync_bios &&
413 device->pending_sync_bios.head) ||
414 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
415 device->pending_bios.head)) {
416 spin_lock(&device->io_lock);
417 requeue_list(pending_bios, pending, tail);
418 goto loop_lock;
419 }
420
421 cur = pending;
422 pending = pending->bi_next;
423 cur->bi_next = NULL;
424
425 /*
426 * atomic_dec_return implies a barrier for waitqueue_active
427 */
428 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
429 waitqueue_active(&fs_info->async_submit_wait))
430 wake_up(&fs_info->async_submit_wait);
431
432 BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
433
434 /*
435 * if we're doing the sync list, record that our
436 * plug has some sync requests on it
437 *
438 * If we're doing the regular list and there are
439 * sync requests sitting around, unplug before
440 * we add more
441 */
442 if (pending_bios == &device->pending_sync_bios) {
443 sync_pending = 1;
444 } else if (sync_pending) {
445 blk_finish_plug(&plug);
446 blk_start_plug(&plug);
447 sync_pending = 0;
448 }
449
450 btrfsic_submit_bio(cur->bi_rw, cur);
451 num_run++;
452 batch_run++;
453
454 cond_resched();
455
456 /*
457 * we made progress, there is more work to do and the bdi
458 * is now congested. Back off and let other work structs
459 * run instead
460 */
461 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
462 fs_info->fs_devices->open_devices > 1) {
463 struct io_context *ioc;
464
465 ioc = current->io_context;
466
467 /*
468 * the main goal here is that we don't want to
469 * block if we're going to be able to submit
470 * more requests without blocking.
471 *
472 * This code does two great things, it pokes into
473 * the elevator code from a filesystem _and_
474 * it makes assumptions about how batching works.
475 */
476 if (ioc && ioc->nr_batch_requests > 0 &&
477 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
478 (last_waited == 0 ||
479 ioc->last_waited == last_waited)) {
480 /*
481 * we want to go through our batch of
482 * requests and stop. So, we copy out
483 * the ioc->last_waited time and test
484 * against it before looping
485 */
486 last_waited = ioc->last_waited;
487 cond_resched();
488 continue;
489 }
490 spin_lock(&device->io_lock);
491 requeue_list(pending_bios, pending, tail);
492 device->running_pending = 1;
493
494 spin_unlock(&device->io_lock);
495 btrfs_queue_work(fs_info->submit_workers,
496 &device->work);
497 goto done;
498 }
499 /* unplug every 64 requests just for good measure */
500 if (batch_run % 64 == 0) {
501 blk_finish_plug(&plug);
502 blk_start_plug(&plug);
503 sync_pending = 0;
504 }
505 }
506
507 cond_resched();
508 if (again)
509 goto loop;
510
511 spin_lock(&device->io_lock);
512 if (device->pending_bios.head || device->pending_sync_bios.head)
513 goto loop_lock;
514 spin_unlock(&device->io_lock);
515
516done:
517 blk_finish_plug(&plug);
518}
519
520static void pending_bios_fn(struct btrfs_work *work)
521{
522 struct btrfs_device *device;
523
524 device = container_of(work, struct btrfs_device, work);
525 run_scheduled_bios(device);
526}
527
528
529void btrfs_free_stale_device(struct btrfs_device *cur_dev)
530{
531 struct btrfs_fs_devices *fs_devs;
532 struct btrfs_device *dev;
533
534 if (!cur_dev->name)
535 return;
536
537 list_for_each_entry(fs_devs, &fs_uuids, list) {
538 int del = 1;
539
540 if (fs_devs->opened)
541 continue;
542 if (fs_devs->seeding)
543 continue;
544
545 list_for_each_entry(dev, &fs_devs->devices, dev_list) {
546
547 if (dev == cur_dev)
548 continue;
549 if (!dev->name)
550 continue;
551
552 /*
553 * Todo: This won't be enough. What if the same device
554 * comes back (with new uuid and) with its mapper path?
555 * But for now, this does help as mostly an admin will
556 * either use mapper or non mapper path throughout.
557 */
558 rcu_read_lock();
559 del = strcmp(rcu_str_deref(dev->name),
560 rcu_str_deref(cur_dev->name));
561 rcu_read_unlock();
562 if (!del)
563 break;
564 }
565
566 if (!del) {
567 /* delete the stale device */
568 if (fs_devs->num_devices == 1) {
569 btrfs_sysfs_remove_fsid(fs_devs);
570 list_del(&fs_devs->list);
571 free_fs_devices(fs_devs);
572 } else {
573 fs_devs->num_devices--;
574 list_del(&dev->dev_list);
575 rcu_string_free(dev->name);
576 kfree(dev);
577 }
578 break;
579 }
580 }
581}
582
583/*
584 * Add new device to list of registered devices
585 *
586 * Returns:
587 * 1 - first time device is seen
588 * 0 - device already known
589 * < 0 - error
590 */
591static noinline int device_list_add(const char *path,
592 struct btrfs_super_block *disk_super,
593 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
594{
595 struct btrfs_device *device;
596 struct btrfs_fs_devices *fs_devices;
597 struct rcu_string *name;
598 int ret = 0;
599 u64 found_transid = btrfs_super_generation(disk_super);
600
601 fs_devices = find_fsid(disk_super->fsid);
602 if (!fs_devices) {
603 fs_devices = alloc_fs_devices(disk_super->fsid);
604 if (IS_ERR(fs_devices))
605 return PTR_ERR(fs_devices);
606
607 list_add(&fs_devices->list, &fs_uuids);
608
609 device = NULL;
610 } else {
611 device = __find_device(&fs_devices->devices, devid,
612 disk_super->dev_item.uuid);
613 }
614
615 if (!device) {
616 if (fs_devices->opened)
617 return -EBUSY;
618
619 device = btrfs_alloc_device(NULL, &devid,
620 disk_super->dev_item.uuid);
621 if (IS_ERR(device)) {
622 /* we can safely leave the fs_devices entry around */
623 return PTR_ERR(device);
624 }
625
626 name = rcu_string_strdup(path, GFP_NOFS);
627 if (!name) {
628 kfree(device);
629 return -ENOMEM;
630 }
631 rcu_assign_pointer(device->name, name);
632
633 mutex_lock(&fs_devices->device_list_mutex);
634 list_add_rcu(&device->dev_list, &fs_devices->devices);
635 fs_devices->num_devices++;
636 mutex_unlock(&fs_devices->device_list_mutex);
637
638 ret = 1;
639 device->fs_devices = fs_devices;
640 } else if (!device->name || strcmp(device->name->str, path)) {
641 /*
642 * When FS is already mounted.
643 * 1. If you are here and if the device->name is NULL that
644 * means this device was missing at time of FS mount.
645 * 2. If you are here and if the device->name is different
646 * from 'path' that means either
647 * a. The same device disappeared and reappeared with
648 * different name. or
649 * b. The missing-disk-which-was-replaced, has
650 * reappeared now.
651 *
652 * We must allow 1 and 2a above. But 2b would be a spurious
653 * and unintentional.
654 *
655 * Further in case of 1 and 2a above, the disk at 'path'
656 * would have missed some transaction when it was away and
657 * in case of 2a the stale bdev has to be updated as well.
658 * 2b must not be allowed at all time.
659 */
660
661 /*
662 * For now, we do allow update to btrfs_fs_device through the
663 * btrfs dev scan cli after FS has been mounted. We're still
664 * tracking a problem where systems fail mount by subvolume id
665 * when we reject replacement on a mounted FS.
666 */
667 if (!fs_devices->opened && found_transid < device->generation) {
668 /*
669 * That is if the FS is _not_ mounted and if you
670 * are here, that means there is more than one
671 * disk with same uuid and devid.We keep the one
672 * with larger generation number or the last-in if
673 * generation are equal.
674 */
675 return -EEXIST;
676 }
677
678 name = rcu_string_strdup(path, GFP_NOFS);
679 if (!name)
680 return -ENOMEM;
681 rcu_string_free(device->name);
682 rcu_assign_pointer(device->name, name);
683 if (device->missing) {
684 fs_devices->missing_devices--;
685 device->missing = 0;
686 }
687 }
688
689 /*
690 * Unmount does not free the btrfs_device struct but would zero
691 * generation along with most of the other members. So just update
692 * it back. We need it to pick the disk with largest generation
693 * (as above).
694 */
695 if (!fs_devices->opened)
696 device->generation = found_transid;
697
698 /*
699 * if there is new btrfs on an already registered device,
700 * then remove the stale device entry.
701 */
702 btrfs_free_stale_device(device);
703
704 *fs_devices_ret = fs_devices;
705
706 return ret;
707}
708
709static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
710{
711 struct btrfs_fs_devices *fs_devices;
712 struct btrfs_device *device;
713 struct btrfs_device *orig_dev;
714
715 fs_devices = alloc_fs_devices(orig->fsid);
716 if (IS_ERR(fs_devices))
717 return fs_devices;
718
719 mutex_lock(&orig->device_list_mutex);
720 fs_devices->total_devices = orig->total_devices;
721
722 /* We have held the volume lock, it is safe to get the devices. */
723 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
724 struct rcu_string *name;
725
726 device = btrfs_alloc_device(NULL, &orig_dev->devid,
727 orig_dev->uuid);
728 if (IS_ERR(device))
729 goto error;
730
731 /*
732 * This is ok to do without rcu read locked because we hold the
733 * uuid mutex so nothing we touch in here is going to disappear.
734 */
735 if (orig_dev->name) {
736 name = rcu_string_strdup(orig_dev->name->str,
737 GFP_KERNEL);
738 if (!name) {
739 kfree(device);
740 goto error;
741 }
742 rcu_assign_pointer(device->name, name);
743 }
744
745 list_add(&device->dev_list, &fs_devices->devices);
746 device->fs_devices = fs_devices;
747 fs_devices->num_devices++;
748 }
749 mutex_unlock(&orig->device_list_mutex);
750 return fs_devices;
751error:
752 mutex_unlock(&orig->device_list_mutex);
753 free_fs_devices(fs_devices);
754 return ERR_PTR(-ENOMEM);
755}
756
757void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
758{
759 struct btrfs_device *device, *next;
760 struct btrfs_device *latest_dev = NULL;
761
762 mutex_lock(&uuid_mutex);
763again:
764 /* This is the initialized path, it is safe to release the devices. */
765 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
766 if (device->in_fs_metadata) {
767 if (!device->is_tgtdev_for_dev_replace &&
768 (!latest_dev ||
769 device->generation > latest_dev->generation)) {
770 latest_dev = device;
771 }
772 continue;
773 }
774
775 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
776 /*
777 * In the first step, keep the device which has
778 * the correct fsid and the devid that is used
779 * for the dev_replace procedure.
780 * In the second step, the dev_replace state is
781 * read from the device tree and it is known
782 * whether the procedure is really active or
783 * not, which means whether this device is
784 * used or whether it should be removed.
785 */
786 if (step == 0 || device->is_tgtdev_for_dev_replace) {
787 continue;
788 }
789 }
790 if (device->bdev) {
791 blkdev_put(device->bdev, device->mode);
792 device->bdev = NULL;
793 fs_devices->open_devices--;
794 }
795 if (device->writeable) {
796 list_del_init(&device->dev_alloc_list);
797 device->writeable = 0;
798 if (!device->is_tgtdev_for_dev_replace)
799 fs_devices->rw_devices--;
800 }
801 list_del_init(&device->dev_list);
802 fs_devices->num_devices--;
803 rcu_string_free(device->name);
804 kfree(device);
805 }
806
807 if (fs_devices->seed) {
808 fs_devices = fs_devices->seed;
809 goto again;
810 }
811
812 fs_devices->latest_bdev = latest_dev->bdev;
813
814 mutex_unlock(&uuid_mutex);
815}
816
817static void __free_device(struct work_struct *work)
818{
819 struct btrfs_device *device;
820
821 device = container_of(work, struct btrfs_device, rcu_work);
822
823 if (device->bdev)
824 blkdev_put(device->bdev, device->mode);
825
826 rcu_string_free(device->name);
827 kfree(device);
828}
829
830static void free_device(struct rcu_head *head)
831{
832 struct btrfs_device *device;
833
834 device = container_of(head, struct btrfs_device, rcu);
835
836 INIT_WORK(&device->rcu_work, __free_device);
837 schedule_work(&device->rcu_work);
838}
839
840static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
841{
842 struct btrfs_device *device, *tmp;
843
844 if (--fs_devices->opened > 0)
845 return 0;
846
847 mutex_lock(&fs_devices->device_list_mutex);
848 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
849 btrfs_close_one_device(device);
850 }
851 mutex_unlock(&fs_devices->device_list_mutex);
852
853 WARN_ON(fs_devices->open_devices);
854 WARN_ON(fs_devices->rw_devices);
855 fs_devices->opened = 0;
856 fs_devices->seeding = 0;
857
858 return 0;
859}
860
861int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
862{
863 struct btrfs_fs_devices *seed_devices = NULL;
864 int ret;
865
866 mutex_lock(&uuid_mutex);
867 ret = __btrfs_close_devices(fs_devices);
868 if (!fs_devices->opened) {
869 seed_devices = fs_devices->seed;
870 fs_devices->seed = NULL;
871 }
872 mutex_unlock(&uuid_mutex);
873
874 while (seed_devices) {
875 fs_devices = seed_devices;
876 seed_devices = fs_devices->seed;
877 __btrfs_close_devices(fs_devices);
878 free_fs_devices(fs_devices);
879 }
880 /*
881 * Wait for rcu kworkers under __btrfs_close_devices
882 * to finish all blkdev_puts so device is really
883 * free when umount is done.
884 */
885 rcu_barrier();
886 return ret;
887}
888
889static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
890 fmode_t flags, void *holder)
891{
892 struct request_queue *q;
893 struct block_device *bdev;
894 struct list_head *head = &fs_devices->devices;
895 struct btrfs_device *device;
896 struct btrfs_device *latest_dev = NULL;
897 struct buffer_head *bh;
898 struct btrfs_super_block *disk_super;
899 u64 devid;
900 int seeding = 1;
901 int ret = 0;
902
903 flags |= FMODE_EXCL;
904
905 list_for_each_entry(device, head, dev_list) {
906 if (device->bdev)
907 continue;
908 if (!device->name)
909 continue;
910
911 /* Just open everything we can; ignore failures here */
912 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
913 &bdev, &bh))
914 continue;
915
916 disk_super = (struct btrfs_super_block *)bh->b_data;
917 devid = btrfs_stack_device_id(&disk_super->dev_item);
918 if (devid != device->devid)
919 goto error_brelse;
920
921 if (memcmp(device->uuid, disk_super->dev_item.uuid,
922 BTRFS_UUID_SIZE))
923 goto error_brelse;
924
925 device->generation = btrfs_super_generation(disk_super);
926 if (!latest_dev ||
927 device->generation > latest_dev->generation)
928 latest_dev = device;
929
930 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
931 device->writeable = 0;
932 } else {
933 device->writeable = !bdev_read_only(bdev);
934 seeding = 0;
935 }
936
937 q = bdev_get_queue(bdev);
938 if (blk_queue_discard(q))
939 device->can_discard = 1;
940
941 device->bdev = bdev;
942 device->in_fs_metadata = 0;
943 device->mode = flags;
944
945 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
946 fs_devices->rotating = 1;
947
948 fs_devices->open_devices++;
949 if (device->writeable &&
950 device->devid != BTRFS_DEV_REPLACE_DEVID) {
951 fs_devices->rw_devices++;
952 list_add(&device->dev_alloc_list,
953 &fs_devices->alloc_list);
954 }
955 brelse(bh);
956 continue;
957
958error_brelse:
959 brelse(bh);
960 blkdev_put(bdev, flags);
961 continue;
962 }
963 if (fs_devices->open_devices == 0) {
964 ret = -EINVAL;
965 goto out;
966 }
967 fs_devices->seeding = seeding;
968 fs_devices->opened = 1;
969 fs_devices->latest_bdev = latest_dev->bdev;
970 fs_devices->total_rw_bytes = 0;
971out:
972 return ret;
973}
974
975int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
976 fmode_t flags, void *holder)
977{
978 int ret;
979
980 mutex_lock(&uuid_mutex);
981 if (fs_devices->opened) {
982 fs_devices->opened++;
983 ret = 0;
984 } else {
985 ret = __btrfs_open_devices(fs_devices, flags, holder);
986 }
987 mutex_unlock(&uuid_mutex);
988 return ret;
989}
990
991void btrfs_release_disk_super(struct page *page)
992{
993 kunmap(page);
994 put_page(page);
995}
996
997int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
998 struct page **page, struct btrfs_super_block **disk_super)
999{
1000 void *p;
1001 pgoff_t index;
1002
1003 /* make sure our super fits in the device */
1004 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1005 return 1;
1006
1007 /* make sure our super fits in the page */
1008 if (sizeof(**disk_super) > PAGE_SIZE)
1009 return 1;
1010
1011 /* make sure our super doesn't straddle pages on disk */
1012 index = bytenr >> PAGE_SHIFT;
1013 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1014 return 1;
1015
1016 /* pull in the page with our super */
1017 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1018 index, GFP_KERNEL);
1019
1020 if (IS_ERR_OR_NULL(*page))
1021 return 1;
1022
1023 p = kmap(*page);
1024
1025 /* align our pointer to the offset of the super block */
1026 *disk_super = p + (bytenr & ~PAGE_MASK);
1027
1028 if (btrfs_super_bytenr(*disk_super) != bytenr ||
1029 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1030 btrfs_release_disk_super(*page);
1031 return 1;
1032 }
1033
1034 if ((*disk_super)->label[0] &&
1035 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1036 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1037
1038 return 0;
1039}
1040
1041/*
1042 * Look for a btrfs signature on a device. This may be called out of the mount path
1043 * and we are not allowed to call set_blocksize during the scan. The superblock
1044 * is read via pagecache
1045 */
1046int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1047 struct btrfs_fs_devices **fs_devices_ret)
1048{
1049 struct btrfs_super_block *disk_super;
1050 struct block_device *bdev;
1051 struct page *page;
1052 int ret = -EINVAL;
1053 u64 devid;
1054 u64 transid;
1055 u64 total_devices;
1056 u64 bytenr;
1057
1058 /*
1059 * we would like to check all the supers, but that would make
1060 * a btrfs mount succeed after a mkfs from a different FS.
1061 * So, we need to add a special mount option to scan for
1062 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1063 */
1064 bytenr = btrfs_sb_offset(0);
1065 flags |= FMODE_EXCL;
1066 mutex_lock(&uuid_mutex);
1067
1068 bdev = blkdev_get_by_path(path, flags, holder);
1069 if (IS_ERR(bdev)) {
1070 ret = PTR_ERR(bdev);
1071 goto error;
1072 }
1073
1074 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1075 goto error_bdev_put;
1076
1077 devid = btrfs_stack_device_id(&disk_super->dev_item);
1078 transid = btrfs_super_generation(disk_super);
1079 total_devices = btrfs_super_num_devices(disk_super);
1080
1081 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1082 if (ret > 0) {
1083 if (disk_super->label[0]) {
1084 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1085 } else {
1086 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1087 }
1088
1089 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1090 ret = 0;
1091 }
1092 if (!ret && fs_devices_ret)
1093 (*fs_devices_ret)->total_devices = total_devices;
1094
1095 btrfs_release_disk_super(page);
1096
1097error_bdev_put:
1098 blkdev_put(bdev, flags);
1099error:
1100 mutex_unlock(&uuid_mutex);
1101 return ret;
1102}
1103
1104/* helper to account the used device space in the range */
1105int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1106 u64 end, u64 *length)
1107{
1108 struct btrfs_key key;
1109 struct btrfs_root *root = device->dev_root;
1110 struct btrfs_dev_extent *dev_extent;
1111 struct btrfs_path *path;
1112 u64 extent_end;
1113 int ret;
1114 int slot;
1115 struct extent_buffer *l;
1116
1117 *length = 0;
1118
1119 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1120 return 0;
1121
1122 path = btrfs_alloc_path();
1123 if (!path)
1124 return -ENOMEM;
1125 path->reada = READA_FORWARD;
1126
1127 key.objectid = device->devid;
1128 key.offset = start;
1129 key.type = BTRFS_DEV_EXTENT_KEY;
1130
1131 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1132 if (ret < 0)
1133 goto out;
1134 if (ret > 0) {
1135 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1136 if (ret < 0)
1137 goto out;
1138 }
1139
1140 while (1) {
1141 l = path->nodes[0];
1142 slot = path->slots[0];
1143 if (slot >= btrfs_header_nritems(l)) {
1144 ret = btrfs_next_leaf(root, path);
1145 if (ret == 0)
1146 continue;
1147 if (ret < 0)
1148 goto out;
1149
1150 break;
1151 }
1152 btrfs_item_key_to_cpu(l, &key, slot);
1153
1154 if (key.objectid < device->devid)
1155 goto next;
1156
1157 if (key.objectid > device->devid)
1158 break;
1159
1160 if (key.type != BTRFS_DEV_EXTENT_KEY)
1161 goto next;
1162
1163 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1164 extent_end = key.offset + btrfs_dev_extent_length(l,
1165 dev_extent);
1166 if (key.offset <= start && extent_end > end) {
1167 *length = end - start + 1;
1168 break;
1169 } else if (key.offset <= start && extent_end > start)
1170 *length += extent_end - start;
1171 else if (key.offset > start && extent_end <= end)
1172 *length += extent_end - key.offset;
1173 else if (key.offset > start && key.offset <= end) {
1174 *length += end - key.offset + 1;
1175 break;
1176 } else if (key.offset > end)
1177 break;
1178
1179next:
1180 path->slots[0]++;
1181 }
1182 ret = 0;
1183out:
1184 btrfs_free_path(path);
1185 return ret;
1186}
1187
1188static int contains_pending_extent(struct btrfs_transaction *transaction,
1189 struct btrfs_device *device,
1190 u64 *start, u64 len)
1191{
1192 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1193 struct extent_map *em;
1194 struct list_head *search_list = &fs_info->pinned_chunks;
1195 int ret = 0;
1196 u64 physical_start = *start;
1197
1198 if (transaction)
1199 search_list = &transaction->pending_chunks;
1200again:
1201 list_for_each_entry(em, search_list, list) {
1202 struct map_lookup *map;
1203 int i;
1204
1205 map = em->map_lookup;
1206 for (i = 0; i < map->num_stripes; i++) {
1207 u64 end;
1208
1209 if (map->stripes[i].dev != device)
1210 continue;
1211 if (map->stripes[i].physical >= physical_start + len ||
1212 map->stripes[i].physical + em->orig_block_len <=
1213 physical_start)
1214 continue;
1215 /*
1216 * Make sure that while processing the pinned list we do
1217 * not override our *start with a lower value, because
1218 * we can have pinned chunks that fall within this
1219 * device hole and that have lower physical addresses
1220 * than the pending chunks we processed before. If we
1221 * do not take this special care we can end up getting
1222 * 2 pending chunks that start at the same physical
1223 * device offsets because the end offset of a pinned
1224 * chunk can be equal to the start offset of some
1225 * pending chunk.
1226 */
1227 end = map->stripes[i].physical + em->orig_block_len;
1228 if (end > *start) {
1229 *start = end;
1230 ret = 1;
1231 }
1232 }
1233 }
1234 if (search_list != &fs_info->pinned_chunks) {
1235 search_list = &fs_info->pinned_chunks;
1236 goto again;
1237 }
1238
1239 return ret;
1240}
1241
1242
1243/*
1244 * find_free_dev_extent_start - find free space in the specified device
1245 * @device: the device which we search the free space in
1246 * @num_bytes: the size of the free space that we need
1247 * @search_start: the position from which to begin the search
1248 * @start: store the start of the free space.
1249 * @len: the size of the free space. that we find, or the size
1250 * of the max free space if we don't find suitable free space
1251 *
1252 * this uses a pretty simple search, the expectation is that it is
1253 * called very infrequently and that a given device has a small number
1254 * of extents
1255 *
1256 * @start is used to store the start of the free space if we find. But if we
1257 * don't find suitable free space, it will be used to store the start position
1258 * of the max free space.
1259 *
1260 * @len is used to store the size of the free space that we find.
1261 * But if we don't find suitable free space, it is used to store the size of
1262 * the max free space.
1263 */
1264int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1265 struct btrfs_device *device, u64 num_bytes,
1266 u64 search_start, u64 *start, u64 *len)
1267{
1268 struct btrfs_key key;
1269 struct btrfs_root *root = device->dev_root;
1270 struct btrfs_dev_extent *dev_extent;
1271 struct btrfs_path *path;
1272 u64 hole_size;
1273 u64 max_hole_start;
1274 u64 max_hole_size;
1275 u64 extent_end;
1276 u64 search_end = device->total_bytes;
1277 int ret;
1278 int slot;
1279 struct extent_buffer *l;
1280 u64 min_search_start;
1281
1282 /*
1283 * We don't want to overwrite the superblock on the drive nor any area
1284 * used by the boot loader (grub for example), so we make sure to start
1285 * at an offset of at least 1MB.
1286 */
1287 min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1288 search_start = max(search_start, min_search_start);
1289
1290 path = btrfs_alloc_path();
1291 if (!path)
1292 return -ENOMEM;
1293
1294 max_hole_start = search_start;
1295 max_hole_size = 0;
1296
1297again:
1298 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1299 ret = -ENOSPC;
1300 goto out;
1301 }
1302
1303 path->reada = READA_FORWARD;
1304 path->search_commit_root = 1;
1305 path->skip_locking = 1;
1306
1307 key.objectid = device->devid;
1308 key.offset = search_start;
1309 key.type = BTRFS_DEV_EXTENT_KEY;
1310
1311 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1312 if (ret < 0)
1313 goto out;
1314 if (ret > 0) {
1315 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1316 if (ret < 0)
1317 goto out;
1318 }
1319
1320 while (1) {
1321 l = path->nodes[0];
1322 slot = path->slots[0];
1323 if (slot >= btrfs_header_nritems(l)) {
1324 ret = btrfs_next_leaf(root, path);
1325 if (ret == 0)
1326 continue;
1327 if (ret < 0)
1328 goto out;
1329
1330 break;
1331 }
1332 btrfs_item_key_to_cpu(l, &key, slot);
1333
1334 if (key.objectid < device->devid)
1335 goto next;
1336
1337 if (key.objectid > device->devid)
1338 break;
1339
1340 if (key.type != BTRFS_DEV_EXTENT_KEY)
1341 goto next;
1342
1343 if (key.offset > search_start) {
1344 hole_size = key.offset - search_start;
1345
1346 /*
1347 * Have to check before we set max_hole_start, otherwise
1348 * we could end up sending back this offset anyway.
1349 */
1350 if (contains_pending_extent(transaction, device,
1351 &search_start,
1352 hole_size)) {
1353 if (key.offset >= search_start) {
1354 hole_size = key.offset - search_start;
1355 } else {
1356 WARN_ON_ONCE(1);
1357 hole_size = 0;
1358 }
1359 }
1360
1361 if (hole_size > max_hole_size) {
1362 max_hole_start = search_start;
1363 max_hole_size = hole_size;
1364 }
1365
1366 /*
1367 * If this free space is greater than which we need,
1368 * it must be the max free space that we have found
1369 * until now, so max_hole_start must point to the start
1370 * of this free space and the length of this free space
1371 * is stored in max_hole_size. Thus, we return
1372 * max_hole_start and max_hole_size and go back to the
1373 * caller.
1374 */
1375 if (hole_size >= num_bytes) {
1376 ret = 0;
1377 goto out;
1378 }
1379 }
1380
1381 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1382 extent_end = key.offset + btrfs_dev_extent_length(l,
1383 dev_extent);
1384 if (extent_end > search_start)
1385 search_start = extent_end;
1386next:
1387 path->slots[0]++;
1388 cond_resched();
1389 }
1390
1391 /*
1392 * At this point, search_start should be the end of
1393 * allocated dev extents, and when shrinking the device,
1394 * search_end may be smaller than search_start.
1395 */
1396 if (search_end > search_start) {
1397 hole_size = search_end - search_start;
1398
1399 if (contains_pending_extent(transaction, device, &search_start,
1400 hole_size)) {
1401 btrfs_release_path(path);
1402 goto again;
1403 }
1404
1405 if (hole_size > max_hole_size) {
1406 max_hole_start = search_start;
1407 max_hole_size = hole_size;
1408 }
1409 }
1410
1411 /* See above. */
1412 if (max_hole_size < num_bytes)
1413 ret = -ENOSPC;
1414 else
1415 ret = 0;
1416
1417out:
1418 btrfs_free_path(path);
1419 *start = max_hole_start;
1420 if (len)
1421 *len = max_hole_size;
1422 return ret;
1423}
1424
1425int find_free_dev_extent(struct btrfs_trans_handle *trans,
1426 struct btrfs_device *device, u64 num_bytes,
1427 u64 *start, u64 *len)
1428{
1429 /* FIXME use last free of some kind */
1430 return find_free_dev_extent_start(trans->transaction, device,
1431 num_bytes, 0, start, len);
1432}
1433
1434static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1435 struct btrfs_device *device,
1436 u64 start, u64 *dev_extent_len)
1437{
1438 int ret;
1439 struct btrfs_path *path;
1440 struct btrfs_root *root = device->dev_root;
1441 struct btrfs_key key;
1442 struct btrfs_key found_key;
1443 struct extent_buffer *leaf = NULL;
1444 struct btrfs_dev_extent *extent = NULL;
1445
1446 path = btrfs_alloc_path();
1447 if (!path)
1448 return -ENOMEM;
1449
1450 key.objectid = device->devid;
1451 key.offset = start;
1452 key.type = BTRFS_DEV_EXTENT_KEY;
1453again:
1454 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1455 if (ret > 0) {
1456 ret = btrfs_previous_item(root, path, key.objectid,
1457 BTRFS_DEV_EXTENT_KEY);
1458 if (ret)
1459 goto out;
1460 leaf = path->nodes[0];
1461 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1462 extent = btrfs_item_ptr(leaf, path->slots[0],
1463 struct btrfs_dev_extent);
1464 BUG_ON(found_key.offset > start || found_key.offset +
1465 btrfs_dev_extent_length(leaf, extent) < start);
1466 key = found_key;
1467 btrfs_release_path(path);
1468 goto again;
1469 } else if (ret == 0) {
1470 leaf = path->nodes[0];
1471 extent = btrfs_item_ptr(leaf, path->slots[0],
1472 struct btrfs_dev_extent);
1473 } else {
1474 btrfs_std_error(root->fs_info, ret, "Slot search failed");
1475 goto out;
1476 }
1477
1478 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1479
1480 ret = btrfs_del_item(trans, root, path);
1481 if (ret) {
1482 btrfs_std_error(root->fs_info, ret,
1483 "Failed to remove dev extent item");
1484 } else {
1485 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1486 }
1487out:
1488 btrfs_free_path(path);
1489 return ret;
1490}
1491
1492static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1493 struct btrfs_device *device,
1494 u64 chunk_tree, u64 chunk_objectid,
1495 u64 chunk_offset, u64 start, u64 num_bytes)
1496{
1497 int ret;
1498 struct btrfs_path *path;
1499 struct btrfs_root *root = device->dev_root;
1500 struct btrfs_dev_extent *extent;
1501 struct extent_buffer *leaf;
1502 struct btrfs_key key;
1503
1504 WARN_ON(!device->in_fs_metadata);
1505 WARN_ON(device->is_tgtdev_for_dev_replace);
1506 path = btrfs_alloc_path();
1507 if (!path)
1508 return -ENOMEM;
1509
1510 key.objectid = device->devid;
1511 key.offset = start;
1512 key.type = BTRFS_DEV_EXTENT_KEY;
1513 ret = btrfs_insert_empty_item(trans, root, path, &key,
1514 sizeof(*extent));
1515 if (ret)
1516 goto out;
1517
1518 leaf = path->nodes[0];
1519 extent = btrfs_item_ptr(leaf, path->slots[0],
1520 struct btrfs_dev_extent);
1521 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1522 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1523 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1524
1525 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1526 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1527
1528 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1529 btrfs_mark_buffer_dirty(leaf);
1530out:
1531 btrfs_free_path(path);
1532 return ret;
1533}
1534
1535static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1536{
1537 struct extent_map_tree *em_tree;
1538 struct extent_map *em;
1539 struct rb_node *n;
1540 u64 ret = 0;
1541
1542 em_tree = &fs_info->mapping_tree.map_tree;
1543 read_lock(&em_tree->lock);
1544 n = rb_last(&em_tree->map);
1545 if (n) {
1546 em = rb_entry(n, struct extent_map, rb_node);
1547 ret = em->start + em->len;
1548 }
1549 read_unlock(&em_tree->lock);
1550
1551 return ret;
1552}
1553
1554static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1555 u64 *devid_ret)
1556{
1557 int ret;
1558 struct btrfs_key key;
1559 struct btrfs_key found_key;
1560 struct btrfs_path *path;
1561
1562 path = btrfs_alloc_path();
1563 if (!path)
1564 return -ENOMEM;
1565
1566 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1567 key.type = BTRFS_DEV_ITEM_KEY;
1568 key.offset = (u64)-1;
1569
1570 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1571 if (ret < 0)
1572 goto error;
1573
1574 BUG_ON(ret == 0); /* Corruption */
1575
1576 ret = btrfs_previous_item(fs_info->chunk_root, path,
1577 BTRFS_DEV_ITEMS_OBJECTID,
1578 BTRFS_DEV_ITEM_KEY);
1579 if (ret) {
1580 *devid_ret = 1;
1581 } else {
1582 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1583 path->slots[0]);
1584 *devid_ret = found_key.offset + 1;
1585 }
1586 ret = 0;
1587error:
1588 btrfs_free_path(path);
1589 return ret;
1590}
1591
1592/*
1593 * the device information is stored in the chunk root
1594 * the btrfs_device struct should be fully filled in
1595 */
1596static int btrfs_add_device(struct btrfs_trans_handle *trans,
1597 struct btrfs_root *root,
1598 struct btrfs_device *device)
1599{
1600 int ret;
1601 struct btrfs_path *path;
1602 struct btrfs_dev_item *dev_item;
1603 struct extent_buffer *leaf;
1604 struct btrfs_key key;
1605 unsigned long ptr;
1606
1607 root = root->fs_info->chunk_root;
1608
1609 path = btrfs_alloc_path();
1610 if (!path)
1611 return -ENOMEM;
1612
1613 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1614 key.type = BTRFS_DEV_ITEM_KEY;
1615 key.offset = device->devid;
1616
1617 ret = btrfs_insert_empty_item(trans, root, path, &key,
1618 sizeof(*dev_item));
1619 if (ret)
1620 goto out;
1621
1622 leaf = path->nodes[0];
1623 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1624
1625 btrfs_set_device_id(leaf, dev_item, device->devid);
1626 btrfs_set_device_generation(leaf, dev_item, 0);
1627 btrfs_set_device_type(leaf, dev_item, device->type);
1628 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1629 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1630 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1631 btrfs_set_device_total_bytes(leaf, dev_item,
1632 btrfs_device_get_disk_total_bytes(device));
1633 btrfs_set_device_bytes_used(leaf, dev_item,
1634 btrfs_device_get_bytes_used(device));
1635 btrfs_set_device_group(leaf, dev_item, 0);
1636 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1637 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1638 btrfs_set_device_start_offset(leaf, dev_item, 0);
1639
1640 ptr = btrfs_device_uuid(dev_item);
1641 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1642 ptr = btrfs_device_fsid(dev_item);
1643 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1644 btrfs_mark_buffer_dirty(leaf);
1645
1646 ret = 0;
1647out:
1648 btrfs_free_path(path);
1649 return ret;
1650}
1651
1652/*
1653 * Function to update ctime/mtime for a given device path.
1654 * Mainly used for ctime/mtime based probe like libblkid.
1655 */
1656static void update_dev_time(char *path_name)
1657{
1658 struct file *filp;
1659
1660 filp = filp_open(path_name, O_RDWR, 0);
1661 if (IS_ERR(filp))
1662 return;
1663 file_update_time(filp);
1664 filp_close(filp, NULL);
1665}
1666
1667static int btrfs_rm_dev_item(struct btrfs_root *root,
1668 struct btrfs_device *device)
1669{
1670 int ret;
1671 struct btrfs_path *path;
1672 struct btrfs_key key;
1673 struct btrfs_trans_handle *trans;
1674
1675 root = root->fs_info->chunk_root;
1676
1677 path = btrfs_alloc_path();
1678 if (!path)
1679 return -ENOMEM;
1680
1681 trans = btrfs_start_transaction(root, 0);
1682 if (IS_ERR(trans)) {
1683 btrfs_free_path(path);
1684 return PTR_ERR(trans);
1685 }
1686 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1687 key.type = BTRFS_DEV_ITEM_KEY;
1688 key.offset = device->devid;
1689
1690 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1691 if (ret < 0)
1692 goto out;
1693
1694 if (ret > 0) {
1695 ret = -ENOENT;
1696 goto out;
1697 }
1698
1699 ret = btrfs_del_item(trans, root, path);
1700 if (ret)
1701 goto out;
1702out:
1703 btrfs_free_path(path);
1704 btrfs_commit_transaction(trans, root);
1705 return ret;
1706}
1707
1708static int __check_raid_min_devices(struct btrfs_fs_info *fs_info)
1709{
1710 u64 all_avail;
1711 u64 num_devices;
1712 unsigned seq;
1713
1714 num_devices = fs_info->fs_devices->num_devices;
1715 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
1716 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1717 WARN_ON(num_devices < 1);
1718 num_devices--;
1719 }
1720 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
1721
1722 do {
1723 seq = read_seqbegin(&fs_info->profiles_lock);
1724
1725 all_avail = fs_info->avail_data_alloc_bits |
1726 fs_info->avail_system_alloc_bits |
1727 fs_info->avail_metadata_alloc_bits;
1728 } while (read_seqretry(&fs_info->profiles_lock, seq));
1729
1730 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1731 return BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1732 }
1733
1734 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1735 return BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1736 }
1737
1738 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1739 fs_info->fs_devices->rw_devices <= 2) {
1740 return BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1741 }
1742
1743 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1744 fs_info->fs_devices->rw_devices <= 3) {
1745 return BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1746 }
1747
1748 return 0;
1749}
1750
1751int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1752{
1753 struct btrfs_device *device;
1754 struct btrfs_device *next_device;
1755 struct block_device *bdev;
1756 struct buffer_head *bh = NULL;
1757 struct btrfs_super_block *disk_super;
1758 struct btrfs_fs_devices *cur_devices;
1759 u64 devid;
1760 u64 num_devices;
1761 u8 *dev_uuid;
1762 int ret = 0;
1763 bool clear_super = false;
1764
1765 mutex_lock(&uuid_mutex);
1766
1767 ret = __check_raid_min_devices(root->fs_info);
1768 if (ret)
1769 goto out;
1770
1771 if (strcmp(device_path, "missing") == 0) {
1772 struct list_head *devices;
1773 struct btrfs_device *tmp;
1774
1775 device = NULL;
1776 devices = &root->fs_info->fs_devices->devices;
1777 /*
1778 * It is safe to read the devices since the volume_mutex
1779 * is held.
1780 */
1781 list_for_each_entry(tmp, devices, dev_list) {
1782 if (tmp->in_fs_metadata &&
1783 !tmp->is_tgtdev_for_dev_replace &&
1784 !tmp->bdev) {
1785 device = tmp;
1786 break;
1787 }
1788 }
1789 bdev = NULL;
1790 bh = NULL;
1791 disk_super = NULL;
1792 if (!device) {
1793 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1794 goto out;
1795 }
1796 } else {
1797 ret = btrfs_get_bdev_and_sb(device_path,
1798 FMODE_WRITE | FMODE_EXCL,
1799 root->fs_info->bdev_holder, 0,
1800 &bdev, &bh);
1801 if (ret)
1802 goto out;
1803 disk_super = (struct btrfs_super_block *)bh->b_data;
1804 devid = btrfs_stack_device_id(&disk_super->dev_item);
1805 dev_uuid = disk_super->dev_item.uuid;
1806 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1807 disk_super->fsid);
1808 if (!device) {
1809 ret = -ENOENT;
1810 goto error_brelse;
1811 }
1812 }
1813
1814 if (device->is_tgtdev_for_dev_replace) {
1815 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1816 goto error_brelse;
1817 }
1818
1819 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1820 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1821 goto error_brelse;
1822 }
1823
1824 if (device->writeable) {
1825 lock_chunks(root);
1826 list_del_init(&device->dev_alloc_list);
1827 device->fs_devices->rw_devices--;
1828 unlock_chunks(root);
1829 clear_super = true;
1830 }
1831
1832 mutex_unlock(&uuid_mutex);
1833 ret = btrfs_shrink_device(device, 0);
1834 mutex_lock(&uuid_mutex);
1835 if (ret)
1836 goto error_undo;
1837
1838 /*
1839 * TODO: the superblock still includes this device in its num_devices
1840 * counter although write_all_supers() is not locked out. This
1841 * could give a filesystem state which requires a degraded mount.
1842 */
1843 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1844 if (ret)
1845 goto error_undo;
1846
1847 device->in_fs_metadata = 0;
1848 btrfs_scrub_cancel_dev(root->fs_info, device);
1849
1850 /*
1851 * the device list mutex makes sure that we don't change
1852 * the device list while someone else is writing out all
1853 * the device supers. Whoever is writing all supers, should
1854 * lock the device list mutex before getting the number of
1855 * devices in the super block (super_copy). Conversely,
1856 * whoever updates the number of devices in the super block
1857 * (super_copy) should hold the device list mutex.
1858 */
1859
1860 cur_devices = device->fs_devices;
1861 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1862 list_del_rcu(&device->dev_list);
1863
1864 device->fs_devices->num_devices--;
1865 device->fs_devices->total_devices--;
1866
1867 if (device->missing)
1868 device->fs_devices->missing_devices--;
1869
1870 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1871 struct btrfs_device, dev_list);
1872 if (device->bdev == root->fs_info->sb->s_bdev)
1873 root->fs_info->sb->s_bdev = next_device->bdev;
1874 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1875 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1876
1877 if (device->bdev) {
1878 device->fs_devices->open_devices--;
1879 /* remove sysfs entry */
1880 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1881 }
1882
1883 call_rcu(&device->rcu, free_device);
1884
1885 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1886 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1887 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1888
1889 if (cur_devices->open_devices == 0) {
1890 struct btrfs_fs_devices *fs_devices;
1891 fs_devices = root->fs_info->fs_devices;
1892 while (fs_devices) {
1893 if (fs_devices->seed == cur_devices) {
1894 fs_devices->seed = cur_devices->seed;
1895 break;
1896 }
1897 fs_devices = fs_devices->seed;
1898 }
1899 cur_devices->seed = NULL;
1900 __btrfs_close_devices(cur_devices);
1901 free_fs_devices(cur_devices);
1902 }
1903
1904 root->fs_info->num_tolerated_disk_barrier_failures =
1905 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1906
1907 /*
1908 * at this point, the device is zero sized. We want to
1909 * remove it from the devices list and zero out the old super
1910 */
1911 if (clear_super && disk_super) {
1912 u64 bytenr;
1913 int i;
1914
1915 /* make sure this device isn't detected as part of
1916 * the FS anymore
1917 */
1918 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1919 set_buffer_dirty(bh);
1920 sync_dirty_buffer(bh);
1921
1922 /* clear the mirror copies of super block on the disk
1923 * being removed, 0th copy is been taken care above and
1924 * the below would take of the rest
1925 */
1926 for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1927 bytenr = btrfs_sb_offset(i);
1928 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1929 i_size_read(bdev->bd_inode))
1930 break;
1931
1932 brelse(bh);
1933 bh = __bread(bdev, bytenr / 4096,
1934 BTRFS_SUPER_INFO_SIZE);
1935 if (!bh)
1936 continue;
1937
1938 disk_super = (struct btrfs_super_block *)bh->b_data;
1939
1940 if (btrfs_super_bytenr(disk_super) != bytenr ||
1941 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1942 continue;
1943 }
1944 memset(&disk_super->magic, 0,
1945 sizeof(disk_super->magic));
1946 set_buffer_dirty(bh);
1947 sync_dirty_buffer(bh);
1948 }
1949 }
1950
1951 ret = 0;
1952
1953 if (bdev) {
1954 /* Notify udev that device has changed */
1955 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1956
1957 /* Update ctime/mtime for device path for libblkid */
1958 update_dev_time(device_path);
1959 }
1960
1961error_brelse:
1962 brelse(bh);
1963 if (bdev)
1964 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1965out:
1966 mutex_unlock(&uuid_mutex);
1967 return ret;
1968error_undo:
1969 if (device->writeable) {
1970 lock_chunks(root);
1971 list_add(&device->dev_alloc_list,
1972 &root->fs_info->fs_devices->alloc_list);
1973 device->fs_devices->rw_devices++;
1974 unlock_chunks(root);
1975 }
1976 goto error_brelse;
1977}
1978
1979void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1980 struct btrfs_device *srcdev)
1981{
1982 struct btrfs_fs_devices *fs_devices;
1983
1984 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1985
1986 /*
1987 * in case of fs with no seed, srcdev->fs_devices will point
1988 * to fs_devices of fs_info. However when the dev being replaced is
1989 * a seed dev it will point to the seed's local fs_devices. In short
1990 * srcdev will have its correct fs_devices in both the cases.
1991 */
1992 fs_devices = srcdev->fs_devices;
1993
1994 list_del_rcu(&srcdev->dev_list);
1995 list_del_rcu(&srcdev->dev_alloc_list);
1996 fs_devices->num_devices--;
1997 if (srcdev->missing)
1998 fs_devices->missing_devices--;
1999
2000 if (srcdev->writeable) {
2001 fs_devices->rw_devices--;
2002 /* zero out the old super if it is writable */
2003 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2004 }
2005
2006 if (srcdev->bdev)
2007 fs_devices->open_devices--;
2008}
2009
2010void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2011 struct btrfs_device *srcdev)
2012{
2013 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2014
2015 call_rcu(&srcdev->rcu, free_device);
2016
2017 /*
2018 * unless fs_devices is seed fs, num_devices shouldn't go
2019 * zero
2020 */
2021 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
2022
2023 /* if this is no devs we rather delete the fs_devices */
2024 if (!fs_devices->num_devices) {
2025 struct btrfs_fs_devices *tmp_fs_devices;
2026
2027 tmp_fs_devices = fs_info->fs_devices;
2028 while (tmp_fs_devices) {
2029 if (tmp_fs_devices->seed == fs_devices) {
2030 tmp_fs_devices->seed = fs_devices->seed;
2031 break;
2032 }
2033 tmp_fs_devices = tmp_fs_devices->seed;
2034 }
2035 fs_devices->seed = NULL;
2036 __btrfs_close_devices(fs_devices);
2037 free_fs_devices(fs_devices);
2038 }
2039}
2040
2041void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2042 struct btrfs_device *tgtdev)
2043{
2044 struct btrfs_device *next_device;
2045
2046 mutex_lock(&uuid_mutex);
2047 WARN_ON(!tgtdev);
2048 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2049
2050 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2051
2052 if (tgtdev->bdev) {
2053 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2054 fs_info->fs_devices->open_devices--;
2055 }
2056 fs_info->fs_devices->num_devices--;
2057
2058 next_device = list_entry(fs_info->fs_devices->devices.next,
2059 struct btrfs_device, dev_list);
2060 if (tgtdev->bdev == fs_info->sb->s_bdev)
2061 fs_info->sb->s_bdev = next_device->bdev;
2062 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
2063 fs_info->fs_devices->latest_bdev = next_device->bdev;
2064 list_del_rcu(&tgtdev->dev_list);
2065
2066 call_rcu(&tgtdev->rcu, free_device);
2067
2068 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2069 mutex_unlock(&uuid_mutex);
2070}
2071
2072static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2073 struct btrfs_device **device)
2074{
2075 int ret = 0;
2076 struct btrfs_super_block *disk_super;
2077 u64 devid;
2078 u8 *dev_uuid;
2079 struct block_device *bdev;
2080 struct buffer_head *bh;
2081
2082 *device = NULL;
2083 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2084 root->fs_info->bdev_holder, 0, &bdev, &bh);
2085 if (ret)
2086 return ret;
2087 disk_super = (struct btrfs_super_block *)bh->b_data;
2088 devid = btrfs_stack_device_id(&disk_super->dev_item);
2089 dev_uuid = disk_super->dev_item.uuid;
2090 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2091 disk_super->fsid);
2092 brelse(bh);
2093 if (!*device)
2094 ret = -ENOENT;
2095 blkdev_put(bdev, FMODE_READ);
2096 return ret;
2097}
2098
2099int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2100 char *device_path,
2101 struct btrfs_device **device)
2102{
2103 *device = NULL;
2104 if (strcmp(device_path, "missing") == 0) {
2105 struct list_head *devices;
2106 struct btrfs_device *tmp;
2107
2108 devices = &root->fs_info->fs_devices->devices;
2109 /*
2110 * It is safe to read the devices since the volume_mutex
2111 * is held by the caller.
2112 */
2113 list_for_each_entry(tmp, devices, dev_list) {
2114 if (tmp->in_fs_metadata && !tmp->bdev) {
2115 *device = tmp;
2116 break;
2117 }
2118 }
2119
2120 if (!*device)
2121 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2122
2123 return 0;
2124 } else {
2125 return btrfs_find_device_by_path(root, device_path, device);
2126 }
2127}
2128
2129int btrfs_find_device_by_user_input(struct btrfs_root *root, u64 srcdevid,
2130 char *srcdev_name,
2131 struct btrfs_device **device)
2132{
2133 int ret;
2134
2135 if (srcdevid) {
2136 ret = 0;
2137 *device = btrfs_find_device(root->fs_info, srcdevid, NULL,
2138 NULL);
2139 if (!*device)
2140 ret = -ENOENT;
2141 } else {
2142 ret = btrfs_find_device_missing_or_by_path(root, srcdev_name,
2143 device);
2144 }
2145 return ret;
2146}
2147
2148/*
2149 * does all the dirty work required for changing file system's UUID.
2150 */
2151static int btrfs_prepare_sprout(struct btrfs_root *root)
2152{
2153 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2154 struct btrfs_fs_devices *old_devices;
2155 struct btrfs_fs_devices *seed_devices;
2156 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2157 struct btrfs_device *device;
2158 u64 super_flags;
2159
2160 BUG_ON(!mutex_is_locked(&uuid_mutex));
2161 if (!fs_devices->seeding)
2162 return -EINVAL;
2163
2164 seed_devices = __alloc_fs_devices();
2165 if (IS_ERR(seed_devices))
2166 return PTR_ERR(seed_devices);
2167
2168 old_devices = clone_fs_devices(fs_devices);
2169 if (IS_ERR(old_devices)) {
2170 kfree(seed_devices);
2171 return PTR_ERR(old_devices);
2172 }
2173
2174 list_add(&old_devices->list, &fs_uuids);
2175
2176 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2177 seed_devices->opened = 1;
2178 INIT_LIST_HEAD(&seed_devices->devices);
2179 INIT_LIST_HEAD(&seed_devices->alloc_list);
2180 mutex_init(&seed_devices->device_list_mutex);
2181
2182 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2183 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2184 synchronize_rcu);
2185 list_for_each_entry(device, &seed_devices->devices, dev_list)
2186 device->fs_devices = seed_devices;
2187
2188 lock_chunks(root);
2189 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2190 unlock_chunks(root);
2191
2192 fs_devices->seeding = 0;
2193 fs_devices->num_devices = 0;
2194 fs_devices->open_devices = 0;
2195 fs_devices->missing_devices = 0;
2196 fs_devices->rotating = 0;
2197 fs_devices->seed = seed_devices;
2198
2199 generate_random_uuid(fs_devices->fsid);
2200 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2201 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2202 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2203
2204 super_flags = btrfs_super_flags(disk_super) &
2205 ~BTRFS_SUPER_FLAG_SEEDING;
2206 btrfs_set_super_flags(disk_super, super_flags);
2207
2208 return 0;
2209}
2210
2211/*
2212 * strore the expected generation for seed devices in device items.
2213 */
2214static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2215 struct btrfs_root *root)
2216{
2217 struct btrfs_path *path;
2218 struct extent_buffer *leaf;
2219 struct btrfs_dev_item *dev_item;
2220 struct btrfs_device *device;
2221 struct btrfs_key key;
2222 u8 fs_uuid[BTRFS_UUID_SIZE];
2223 u8 dev_uuid[BTRFS_UUID_SIZE];
2224 u64 devid;
2225 int ret;
2226
2227 path = btrfs_alloc_path();
2228 if (!path)
2229 return -ENOMEM;
2230
2231 root = root->fs_info->chunk_root;
2232 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2233 key.offset = 0;
2234 key.type = BTRFS_DEV_ITEM_KEY;
2235
2236 while (1) {
2237 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2238 if (ret < 0)
2239 goto error;
2240
2241 leaf = path->nodes[0];
2242next_slot:
2243 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2244 ret = btrfs_next_leaf(root, path);
2245 if (ret > 0)
2246 break;
2247 if (ret < 0)
2248 goto error;
2249 leaf = path->nodes[0];
2250 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2251 btrfs_release_path(path);
2252 continue;
2253 }
2254
2255 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2256 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2257 key.type != BTRFS_DEV_ITEM_KEY)
2258 break;
2259
2260 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2261 struct btrfs_dev_item);
2262 devid = btrfs_device_id(leaf, dev_item);
2263 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2264 BTRFS_UUID_SIZE);
2265 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2266 BTRFS_UUID_SIZE);
2267 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2268 fs_uuid);
2269 BUG_ON(!device); /* Logic error */
2270
2271 if (device->fs_devices->seeding) {
2272 btrfs_set_device_generation(leaf, dev_item,
2273 device->generation);
2274 btrfs_mark_buffer_dirty(leaf);
2275 }
2276
2277 path->slots[0]++;
2278 goto next_slot;
2279 }
2280 ret = 0;
2281error:
2282 btrfs_free_path(path);
2283 return ret;
2284}
2285
2286int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2287{
2288 struct request_queue *q;
2289 struct btrfs_trans_handle *trans;
2290 struct btrfs_device *device;
2291 struct block_device *bdev;
2292 struct list_head *devices;
2293 struct super_block *sb = root->fs_info->sb;
2294 struct rcu_string *name;
2295 u64 tmp;
2296 int seeding_dev = 0;
2297 int ret = 0;
2298
2299 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2300 return -EROFS;
2301
2302 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2303 root->fs_info->bdev_holder);
2304 if (IS_ERR(bdev))
2305 return PTR_ERR(bdev);
2306
2307 if (root->fs_info->fs_devices->seeding) {
2308 seeding_dev = 1;
2309 down_write(&sb->s_umount);
2310 mutex_lock(&uuid_mutex);
2311 }
2312
2313 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2314
2315 devices = &root->fs_info->fs_devices->devices;
2316
2317 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2318 list_for_each_entry(device, devices, dev_list) {
2319 if (device->bdev == bdev) {
2320 ret = -EEXIST;
2321 mutex_unlock(
2322 &root->fs_info->fs_devices->device_list_mutex);
2323 goto error;
2324 }
2325 }
2326 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2327
2328 device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2329 if (IS_ERR(device)) {
2330 /* we can safely leave the fs_devices entry around */
2331 ret = PTR_ERR(device);
2332 goto error;
2333 }
2334
2335 name = rcu_string_strdup(device_path, GFP_KERNEL);
2336 if (!name) {
2337 kfree(device);
2338 ret = -ENOMEM;
2339 goto error;
2340 }
2341 rcu_assign_pointer(device->name, name);
2342
2343 trans = btrfs_start_transaction(root, 0);
2344 if (IS_ERR(trans)) {
2345 rcu_string_free(device->name);
2346 kfree(device);
2347 ret = PTR_ERR(trans);
2348 goto error;
2349 }
2350
2351 q = bdev_get_queue(bdev);
2352 if (blk_queue_discard(q))
2353 device->can_discard = 1;
2354 device->writeable = 1;
2355 device->generation = trans->transid;
2356 device->io_width = root->sectorsize;
2357 device->io_align = root->sectorsize;
2358 device->sector_size = root->sectorsize;
2359 device->total_bytes = i_size_read(bdev->bd_inode);
2360 device->disk_total_bytes = device->total_bytes;
2361 device->commit_total_bytes = device->total_bytes;
2362 device->dev_root = root->fs_info->dev_root;
2363 device->bdev = bdev;
2364 device->in_fs_metadata = 1;
2365 device->is_tgtdev_for_dev_replace = 0;
2366 device->mode = FMODE_EXCL;
2367 device->dev_stats_valid = 1;
2368 set_blocksize(device->bdev, 4096);
2369
2370 if (seeding_dev) {
2371 sb->s_flags &= ~MS_RDONLY;
2372 ret = btrfs_prepare_sprout(root);
2373 BUG_ON(ret); /* -ENOMEM */
2374 }
2375
2376 device->fs_devices = root->fs_info->fs_devices;
2377
2378 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2379 lock_chunks(root);
2380 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2381 list_add(&device->dev_alloc_list,
2382 &root->fs_info->fs_devices->alloc_list);
2383 root->fs_info->fs_devices->num_devices++;
2384 root->fs_info->fs_devices->open_devices++;
2385 root->fs_info->fs_devices->rw_devices++;
2386 root->fs_info->fs_devices->total_devices++;
2387 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2388
2389 spin_lock(&root->fs_info->free_chunk_lock);
2390 root->fs_info->free_chunk_space += device->total_bytes;
2391 spin_unlock(&root->fs_info->free_chunk_lock);
2392
2393 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2394 root->fs_info->fs_devices->rotating = 1;
2395
2396 tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2397 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2398 tmp + device->total_bytes);
2399
2400 tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2401 btrfs_set_super_num_devices(root->fs_info->super_copy,
2402 tmp + 1);
2403
2404 /* add sysfs device entry */
2405 btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2406
2407 /*
2408 * we've got more storage, clear any full flags on the space
2409 * infos
2410 */
2411 btrfs_clear_space_info_full(root->fs_info);
2412
2413 unlock_chunks(root);
2414 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2415
2416 if (seeding_dev) {
2417 lock_chunks(root);
2418 ret = init_first_rw_device(trans, root, device);
2419 unlock_chunks(root);
2420 if (ret) {
2421 btrfs_abort_transaction(trans, root, ret);
2422 goto error_trans;
2423 }
2424 }
2425
2426 ret = btrfs_add_device(trans, root, device);
2427 if (ret) {
2428 btrfs_abort_transaction(trans, root, ret);
2429 goto error_trans;
2430 }
2431
2432 if (seeding_dev) {
2433 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2434
2435 ret = btrfs_finish_sprout(trans, root);
2436 if (ret) {
2437 btrfs_abort_transaction(trans, root, ret);
2438 goto error_trans;
2439 }
2440
2441 /* Sprouting would change fsid of the mounted root,
2442 * so rename the fsid on the sysfs
2443 */
2444 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2445 root->fs_info->fsid);
2446 if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2447 fsid_buf))
2448 btrfs_warn(root->fs_info,
2449 "sysfs: failed to create fsid for sprout");
2450 }
2451
2452 root->fs_info->num_tolerated_disk_barrier_failures =
2453 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2454 ret = btrfs_commit_transaction(trans, root);
2455
2456 if (seeding_dev) {
2457 mutex_unlock(&uuid_mutex);
2458 up_write(&sb->s_umount);
2459
2460 if (ret) /* transaction commit */
2461 return ret;
2462
2463 ret = btrfs_relocate_sys_chunks(root);
2464 if (ret < 0)
2465 btrfs_std_error(root->fs_info, ret,
2466 "Failed to relocate sys chunks after "
2467 "device initialization. This can be fixed "
2468 "using the \"btrfs balance\" command.");
2469 trans = btrfs_attach_transaction(root);
2470 if (IS_ERR(trans)) {
2471 if (PTR_ERR(trans) == -ENOENT)
2472 return 0;
2473 return PTR_ERR(trans);
2474 }
2475 ret = btrfs_commit_transaction(trans, root);
2476 }
2477
2478 /* Update ctime/mtime for libblkid */
2479 update_dev_time(device_path);
2480 return ret;
2481
2482error_trans:
2483 btrfs_end_transaction(trans, root);
2484 rcu_string_free(device->name);
2485 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2486 kfree(device);
2487error:
2488 blkdev_put(bdev, FMODE_EXCL);
2489 if (seeding_dev) {
2490 mutex_unlock(&uuid_mutex);
2491 up_write(&sb->s_umount);
2492 }
2493 return ret;
2494}
2495
2496int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2497 struct btrfs_device *srcdev,
2498 struct btrfs_device **device_out)
2499{
2500 struct request_queue *q;
2501 struct btrfs_device *device;
2502 struct block_device *bdev;
2503 struct btrfs_fs_info *fs_info = root->fs_info;
2504 struct list_head *devices;
2505 struct rcu_string *name;
2506 u64 devid = BTRFS_DEV_REPLACE_DEVID;
2507 int ret = 0;
2508
2509 *device_out = NULL;
2510 if (fs_info->fs_devices->seeding) {
2511 btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2512 return -EINVAL;
2513 }
2514
2515 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2516 fs_info->bdev_holder);
2517 if (IS_ERR(bdev)) {
2518 btrfs_err(fs_info, "target device %s is invalid!", device_path);
2519 return PTR_ERR(bdev);
2520 }
2521
2522 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2523
2524 devices = &fs_info->fs_devices->devices;
2525 list_for_each_entry(device, devices, dev_list) {
2526 if (device->bdev == bdev) {
2527 btrfs_err(fs_info, "target device is in the filesystem!");
2528 ret = -EEXIST;
2529 goto error;
2530 }
2531 }
2532
2533
2534 if (i_size_read(bdev->bd_inode) <
2535 btrfs_device_get_total_bytes(srcdev)) {
2536 btrfs_err(fs_info, "target device is smaller than source device!");
2537 ret = -EINVAL;
2538 goto error;
2539 }
2540
2541
2542 device = btrfs_alloc_device(NULL, &devid, NULL);
2543 if (IS_ERR(device)) {
2544 ret = PTR_ERR(device);
2545 goto error;
2546 }
2547
2548 name = rcu_string_strdup(device_path, GFP_NOFS);
2549 if (!name) {
2550 kfree(device);
2551 ret = -ENOMEM;
2552 goto error;
2553 }
2554 rcu_assign_pointer(device->name, name);
2555
2556 q = bdev_get_queue(bdev);
2557 if (blk_queue_discard(q))
2558 device->can_discard = 1;
2559 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2560 device->writeable = 1;
2561 device->generation = 0;
2562 device->io_width = root->sectorsize;
2563 device->io_align = root->sectorsize;
2564 device->sector_size = root->sectorsize;
2565 device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2566 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2567 device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2568 ASSERT(list_empty(&srcdev->resized_list));
2569 device->commit_total_bytes = srcdev->commit_total_bytes;
2570 device->commit_bytes_used = device->bytes_used;
2571 device->dev_root = fs_info->dev_root;
2572 device->bdev = bdev;
2573 device->in_fs_metadata = 1;
2574 device->is_tgtdev_for_dev_replace = 1;
2575 device->mode = FMODE_EXCL;
2576 device->dev_stats_valid = 1;
2577 set_blocksize(device->bdev, 4096);
2578 device->fs_devices = fs_info->fs_devices;
2579 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2580 fs_info->fs_devices->num_devices++;
2581 fs_info->fs_devices->open_devices++;
2582 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2583
2584 *device_out = device;
2585 return ret;
2586
2587error:
2588 blkdev_put(bdev, FMODE_EXCL);
2589 return ret;
2590}
2591
2592void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2593 struct btrfs_device *tgtdev)
2594{
2595 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2596 tgtdev->io_width = fs_info->dev_root->sectorsize;
2597 tgtdev->io_align = fs_info->dev_root->sectorsize;
2598 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2599 tgtdev->dev_root = fs_info->dev_root;
2600 tgtdev->in_fs_metadata = 1;
2601}
2602
2603static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2604 struct btrfs_device *device)
2605{
2606 int ret;
2607 struct btrfs_path *path;
2608 struct btrfs_root *root;
2609 struct btrfs_dev_item *dev_item;
2610 struct extent_buffer *leaf;
2611 struct btrfs_key key;
2612
2613 root = device->dev_root->fs_info->chunk_root;
2614
2615 path = btrfs_alloc_path();
2616 if (!path)
2617 return -ENOMEM;
2618
2619 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2620 key.type = BTRFS_DEV_ITEM_KEY;
2621 key.offset = device->devid;
2622
2623 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2624 if (ret < 0)
2625 goto out;
2626
2627 if (ret > 0) {
2628 ret = -ENOENT;
2629 goto out;
2630 }
2631
2632 leaf = path->nodes[0];
2633 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2634
2635 btrfs_set_device_id(leaf, dev_item, device->devid);
2636 btrfs_set_device_type(leaf, dev_item, device->type);
2637 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2638 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2639 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2640 btrfs_set_device_total_bytes(leaf, dev_item,
2641 btrfs_device_get_disk_total_bytes(device));
2642 btrfs_set_device_bytes_used(leaf, dev_item,
2643 btrfs_device_get_bytes_used(device));
2644 btrfs_mark_buffer_dirty(leaf);
2645
2646out:
2647 btrfs_free_path(path);
2648 return ret;
2649}
2650
2651int btrfs_grow_device(struct btrfs_trans_handle *trans,
2652 struct btrfs_device *device, u64 new_size)
2653{
2654 struct btrfs_super_block *super_copy =
2655 device->dev_root->fs_info->super_copy;
2656 struct btrfs_fs_devices *fs_devices;
2657 u64 old_total;
2658 u64 diff;
2659
2660 if (!device->writeable)
2661 return -EACCES;
2662
2663 lock_chunks(device->dev_root);
2664 old_total = btrfs_super_total_bytes(super_copy);
2665 diff = new_size - device->total_bytes;
2666
2667 if (new_size <= device->total_bytes ||
2668 device->is_tgtdev_for_dev_replace) {
2669 unlock_chunks(device->dev_root);
2670 return -EINVAL;
2671 }
2672
2673 fs_devices = device->dev_root->fs_info->fs_devices;
2674
2675 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2676 device->fs_devices->total_rw_bytes += diff;
2677
2678 btrfs_device_set_total_bytes(device, new_size);
2679 btrfs_device_set_disk_total_bytes(device, new_size);
2680 btrfs_clear_space_info_full(device->dev_root->fs_info);
2681 if (list_empty(&device->resized_list))
2682 list_add_tail(&device->resized_list,
2683 &fs_devices->resized_devices);
2684 unlock_chunks(device->dev_root);
2685
2686 return btrfs_update_device(trans, device);
2687}
2688
2689static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2690 struct btrfs_root *root, u64 chunk_objectid,
2691 u64 chunk_offset)
2692{
2693 int ret;
2694 struct btrfs_path *path;
2695 struct btrfs_key key;
2696
2697 root = root->fs_info->chunk_root;
2698 path = btrfs_alloc_path();
2699 if (!path)
2700 return -ENOMEM;
2701
2702 key.objectid = chunk_objectid;
2703 key.offset = chunk_offset;
2704 key.type = BTRFS_CHUNK_ITEM_KEY;
2705
2706 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2707 if (ret < 0)
2708 goto out;
2709 else if (ret > 0) { /* Logic error or corruption */
2710 btrfs_std_error(root->fs_info, -ENOENT,
2711 "Failed lookup while freeing chunk.");
2712 ret = -ENOENT;
2713 goto out;
2714 }
2715
2716 ret = btrfs_del_item(trans, root, path);
2717 if (ret < 0)
2718 btrfs_std_error(root->fs_info, ret,
2719 "Failed to delete chunk item.");
2720out:
2721 btrfs_free_path(path);
2722 return ret;
2723}
2724
2725static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2726 chunk_offset)
2727{
2728 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2729 struct btrfs_disk_key *disk_key;
2730 struct btrfs_chunk *chunk;
2731 u8 *ptr;
2732 int ret = 0;
2733 u32 num_stripes;
2734 u32 array_size;
2735 u32 len = 0;
2736 u32 cur;
2737 struct btrfs_key key;
2738
2739 lock_chunks(root);
2740 array_size = btrfs_super_sys_array_size(super_copy);
2741
2742 ptr = super_copy->sys_chunk_array;
2743 cur = 0;
2744
2745 while (cur < array_size) {
2746 disk_key = (struct btrfs_disk_key *)ptr;
2747 btrfs_disk_key_to_cpu(&key, disk_key);
2748
2749 len = sizeof(*disk_key);
2750
2751 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2752 chunk = (struct btrfs_chunk *)(ptr + len);
2753 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2754 len += btrfs_chunk_item_size(num_stripes);
2755 } else {
2756 ret = -EIO;
2757 break;
2758 }
2759 if (key.objectid == chunk_objectid &&
2760 key.offset == chunk_offset) {
2761 memmove(ptr, ptr + len, array_size - (cur + len));
2762 array_size -= len;
2763 btrfs_set_super_sys_array_size(super_copy, array_size);
2764 } else {
2765 ptr += len;
2766 cur += len;
2767 }
2768 }
2769 unlock_chunks(root);
2770 return ret;
2771}
2772
2773int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2774 struct btrfs_root *root, u64 chunk_offset)
2775{
2776 struct extent_map_tree *em_tree;
2777 struct extent_map *em;
2778 struct btrfs_root *extent_root = root->fs_info->extent_root;
2779 struct map_lookup *map;
2780 u64 dev_extent_len = 0;
2781 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2782 int i, ret = 0;
2783
2784 /* Just in case */
2785 root = root->fs_info->chunk_root;
2786 em_tree = &root->fs_info->mapping_tree.map_tree;
2787
2788 read_lock(&em_tree->lock);
2789 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2790 read_unlock(&em_tree->lock);
2791
2792 if (!em || em->start > chunk_offset ||
2793 em->start + em->len < chunk_offset) {
2794 /*
2795 * This is a logic error, but we don't want to just rely on the
2796 * user having built with ASSERT enabled, so if ASSERT doesn't
2797 * do anything we still error out.
2798 */
2799 ASSERT(0);
2800 if (em)
2801 free_extent_map(em);
2802 return -EINVAL;
2803 }
2804 map = em->map_lookup;
2805 lock_chunks(root->fs_info->chunk_root);
2806 check_system_chunk(trans, extent_root, map->type);
2807 unlock_chunks(root->fs_info->chunk_root);
2808
2809 for (i = 0; i < map->num_stripes; i++) {
2810 struct btrfs_device *device = map->stripes[i].dev;
2811 ret = btrfs_free_dev_extent(trans, device,
2812 map->stripes[i].physical,
2813 &dev_extent_len);
2814 if (ret) {
2815 btrfs_abort_transaction(trans, root, ret);
2816 goto out;
2817 }
2818
2819 if (device->bytes_used > 0) {
2820 lock_chunks(root);
2821 btrfs_device_set_bytes_used(device,
2822 device->bytes_used - dev_extent_len);
2823 spin_lock(&root->fs_info->free_chunk_lock);
2824 root->fs_info->free_chunk_space += dev_extent_len;
2825 spin_unlock(&root->fs_info->free_chunk_lock);
2826 btrfs_clear_space_info_full(root->fs_info);
2827 unlock_chunks(root);
2828 }
2829
2830 if (map->stripes[i].dev) {
2831 ret = btrfs_update_device(trans, map->stripes[i].dev);
2832 if (ret) {
2833 btrfs_abort_transaction(trans, root, ret);
2834 goto out;
2835 }
2836 }
2837 }
2838 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2839 if (ret) {
2840 btrfs_abort_transaction(trans, root, ret);
2841 goto out;
2842 }
2843
2844 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2845
2846 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2847 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2848 if (ret) {
2849 btrfs_abort_transaction(trans, root, ret);
2850 goto out;
2851 }
2852 }
2853
2854 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2855 if (ret) {
2856 btrfs_abort_transaction(trans, extent_root, ret);
2857 goto out;
2858 }
2859
2860out:
2861 /* once for us */
2862 free_extent_map(em);
2863 return ret;
2864}
2865
2866static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2867{
2868 struct btrfs_root *extent_root;
2869 struct btrfs_trans_handle *trans;
2870 int ret;
2871
2872 root = root->fs_info->chunk_root;
2873 extent_root = root->fs_info->extent_root;
2874
2875 /*
2876 * Prevent races with automatic removal of unused block groups.
2877 * After we relocate and before we remove the chunk with offset
2878 * chunk_offset, automatic removal of the block group can kick in,
2879 * resulting in a failure when calling btrfs_remove_chunk() below.
2880 *
2881 * Make sure to acquire this mutex before doing a tree search (dev
2882 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2883 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2884 * we release the path used to search the chunk/dev tree and before
2885 * the current task acquires this mutex and calls us.
2886 */
2887 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2888
2889 ret = btrfs_can_relocate(extent_root, chunk_offset);
2890 if (ret)
2891 return -ENOSPC;
2892
2893 /* step one, relocate all the extents inside this chunk */
2894 btrfs_scrub_pause(root);
2895 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2896 btrfs_scrub_continue(root);
2897 if (ret)
2898 return ret;
2899
2900 trans = btrfs_start_trans_remove_block_group(root->fs_info,
2901 chunk_offset);
2902 if (IS_ERR(trans)) {
2903 ret = PTR_ERR(trans);
2904 btrfs_std_error(root->fs_info, ret, NULL);
2905 return ret;
2906 }
2907
2908 /*
2909 * step two, delete the device extents and the
2910 * chunk tree entries
2911 */
2912 ret = btrfs_remove_chunk(trans, root, chunk_offset);
2913 btrfs_end_transaction(trans, root);
2914 return ret;
2915}
2916
2917static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2918{
2919 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2920 struct btrfs_path *path;
2921 struct extent_buffer *leaf;
2922 struct btrfs_chunk *chunk;
2923 struct btrfs_key key;
2924 struct btrfs_key found_key;
2925 u64 chunk_type;
2926 bool retried = false;
2927 int failed = 0;
2928 int ret;
2929
2930 path = btrfs_alloc_path();
2931 if (!path)
2932 return -ENOMEM;
2933
2934again:
2935 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2936 key.offset = (u64)-1;
2937 key.type = BTRFS_CHUNK_ITEM_KEY;
2938
2939 while (1) {
2940 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2941 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2942 if (ret < 0) {
2943 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2944 goto error;
2945 }
2946 BUG_ON(ret == 0); /* Corruption */
2947
2948 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2949 key.type);
2950 if (ret)
2951 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2952 if (ret < 0)
2953 goto error;
2954 if (ret > 0)
2955 break;
2956
2957 leaf = path->nodes[0];
2958 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2959
2960 chunk = btrfs_item_ptr(leaf, path->slots[0],
2961 struct btrfs_chunk);
2962 chunk_type = btrfs_chunk_type(leaf, chunk);
2963 btrfs_release_path(path);
2964
2965 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2966 ret = btrfs_relocate_chunk(chunk_root,
2967 found_key.offset);
2968 if (ret == -ENOSPC)
2969 failed++;
2970 else
2971 BUG_ON(ret);
2972 }
2973 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2974
2975 if (found_key.offset == 0)
2976 break;
2977 key.offset = found_key.offset - 1;
2978 }
2979 ret = 0;
2980 if (failed && !retried) {
2981 failed = 0;
2982 retried = true;
2983 goto again;
2984 } else if (WARN_ON(failed && retried)) {
2985 ret = -ENOSPC;
2986 }
2987error:
2988 btrfs_free_path(path);
2989 return ret;
2990}
2991
2992static int insert_balance_item(struct btrfs_root *root,
2993 struct btrfs_balance_control *bctl)
2994{
2995 struct btrfs_trans_handle *trans;
2996 struct btrfs_balance_item *item;
2997 struct btrfs_disk_balance_args disk_bargs;
2998 struct btrfs_path *path;
2999 struct extent_buffer *leaf;
3000 struct btrfs_key key;
3001 int ret, err;
3002
3003 path = btrfs_alloc_path();
3004 if (!path)
3005 return -ENOMEM;
3006
3007 trans = btrfs_start_transaction(root, 0);
3008 if (IS_ERR(trans)) {
3009 btrfs_free_path(path);
3010 return PTR_ERR(trans);
3011 }
3012
3013 key.objectid = BTRFS_BALANCE_OBJECTID;
3014 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3015 key.offset = 0;
3016
3017 ret = btrfs_insert_empty_item(trans, root, path, &key,
3018 sizeof(*item));
3019 if (ret)
3020 goto out;
3021
3022 leaf = path->nodes[0];
3023 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3024
3025 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
3026
3027 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3028 btrfs_set_balance_data(leaf, item, &disk_bargs);
3029 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3030 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3031 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3032 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3033
3034 btrfs_set_balance_flags(leaf, item, bctl->flags);
3035
3036 btrfs_mark_buffer_dirty(leaf);
3037out:
3038 btrfs_free_path(path);
3039 err = btrfs_commit_transaction(trans, root);
3040 if (err && !ret)
3041 ret = err;
3042 return ret;
3043}
3044
3045static int del_balance_item(struct btrfs_root *root)
3046{
3047 struct btrfs_trans_handle *trans;
3048 struct btrfs_path *path;
3049 struct btrfs_key key;
3050 int ret, err;
3051
3052 path = btrfs_alloc_path();
3053 if (!path)
3054 return -ENOMEM;
3055
3056 trans = btrfs_start_transaction(root, 0);
3057 if (IS_ERR(trans)) {
3058 btrfs_free_path(path);
3059 return PTR_ERR(trans);
3060 }
3061
3062 key.objectid = BTRFS_BALANCE_OBJECTID;
3063 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3064 key.offset = 0;
3065
3066 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3067 if (ret < 0)
3068 goto out;
3069 if (ret > 0) {
3070 ret = -ENOENT;
3071 goto out;
3072 }
3073
3074 ret = btrfs_del_item(trans, root, path);
3075out:
3076 btrfs_free_path(path);
3077 err = btrfs_commit_transaction(trans, root);
3078 if (err && !ret)
3079 ret = err;
3080 return ret;
3081}
3082
3083/*
3084 * This is a heuristic used to reduce the number of chunks balanced on
3085 * resume after balance was interrupted.
3086 */
3087static void update_balance_args(struct btrfs_balance_control *bctl)
3088{
3089 /*
3090 * Turn on soft mode for chunk types that were being converted.
3091 */
3092 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3093 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3094 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3095 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3096 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3097 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3098
3099 /*
3100 * Turn on usage filter if is not already used. The idea is
3101 * that chunks that we have already balanced should be
3102 * reasonably full. Don't do it for chunks that are being
3103 * converted - that will keep us from relocating unconverted
3104 * (albeit full) chunks.
3105 */
3106 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3107 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3108 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3109 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3110 bctl->data.usage = 90;
3111 }
3112 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3113 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3114 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3115 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3116 bctl->sys.usage = 90;
3117 }
3118 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3119 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3120 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3121 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3122 bctl->meta.usage = 90;
3123 }
3124}
3125
3126/*
3127 * Should be called with both balance and volume mutexes held to
3128 * serialize other volume operations (add_dev/rm_dev/resize) with
3129 * restriper. Same goes for unset_balance_control.
3130 */
3131static void set_balance_control(struct btrfs_balance_control *bctl)
3132{
3133 struct btrfs_fs_info *fs_info = bctl->fs_info;
3134
3135 BUG_ON(fs_info->balance_ctl);
3136
3137 spin_lock(&fs_info->balance_lock);
3138 fs_info->balance_ctl = bctl;
3139 spin_unlock(&fs_info->balance_lock);
3140}
3141
3142static void unset_balance_control(struct btrfs_fs_info *fs_info)
3143{
3144 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3145
3146 BUG_ON(!fs_info->balance_ctl);
3147
3148 spin_lock(&fs_info->balance_lock);
3149 fs_info->balance_ctl = NULL;
3150 spin_unlock(&fs_info->balance_lock);
3151
3152 kfree(bctl);
3153}
3154
3155/*
3156 * Balance filters. Return 1 if chunk should be filtered out
3157 * (should not be balanced).
3158 */
3159static int chunk_profiles_filter(u64 chunk_type,
3160 struct btrfs_balance_args *bargs)
3161{
3162 chunk_type = chunk_to_extended(chunk_type) &
3163 BTRFS_EXTENDED_PROFILE_MASK;
3164
3165 if (bargs->profiles & chunk_type)
3166 return 0;
3167
3168 return 1;
3169}
3170
3171static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3172 struct btrfs_balance_args *bargs)
3173{
3174 struct btrfs_block_group_cache *cache;
3175 u64 chunk_used;
3176 u64 user_thresh_min;
3177 u64 user_thresh_max;
3178 int ret = 1;
3179
3180 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3181 chunk_used = btrfs_block_group_used(&cache->item);
3182
3183 if (bargs->usage_min == 0)
3184 user_thresh_min = 0;
3185 else
3186 user_thresh_min = div_factor_fine(cache->key.offset,
3187 bargs->usage_min);
3188
3189 if (bargs->usage_max == 0)
3190 user_thresh_max = 1;
3191 else if (bargs->usage_max > 100)
3192 user_thresh_max = cache->key.offset;
3193 else
3194 user_thresh_max = div_factor_fine(cache->key.offset,
3195 bargs->usage_max);
3196
3197 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3198 ret = 0;
3199
3200 btrfs_put_block_group(cache);
3201 return ret;
3202}
3203
3204static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3205 u64 chunk_offset, struct btrfs_balance_args *bargs)
3206{
3207 struct btrfs_block_group_cache *cache;
3208 u64 chunk_used, user_thresh;
3209 int ret = 1;
3210
3211 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3212 chunk_used = btrfs_block_group_used(&cache->item);
3213
3214 if (bargs->usage_min == 0)
3215 user_thresh = 1;
3216 else if (bargs->usage > 100)
3217 user_thresh = cache->key.offset;
3218 else
3219 user_thresh = div_factor_fine(cache->key.offset,
3220 bargs->usage);
3221
3222 if (chunk_used < user_thresh)
3223 ret = 0;
3224
3225 btrfs_put_block_group(cache);
3226 return ret;
3227}
3228
3229static int chunk_devid_filter(struct extent_buffer *leaf,
3230 struct btrfs_chunk *chunk,
3231 struct btrfs_balance_args *bargs)
3232{
3233 struct btrfs_stripe *stripe;
3234 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3235 int i;
3236
3237 for (i = 0; i < num_stripes; i++) {
3238 stripe = btrfs_stripe_nr(chunk, i);
3239 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3240 return 0;
3241 }
3242
3243 return 1;
3244}
3245
3246/* [pstart, pend) */
3247static int chunk_drange_filter(struct extent_buffer *leaf,
3248 struct btrfs_chunk *chunk,
3249 u64 chunk_offset,
3250 struct btrfs_balance_args *bargs)
3251{
3252 struct btrfs_stripe *stripe;
3253 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3254 u64 stripe_offset;
3255 u64 stripe_length;
3256 int factor;
3257 int i;
3258
3259 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3260 return 0;
3261
3262 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3263 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3264 factor = num_stripes / 2;
3265 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3266 factor = num_stripes - 1;
3267 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3268 factor = num_stripes - 2;
3269 } else {
3270 factor = num_stripes;
3271 }
3272
3273 for (i = 0; i < num_stripes; i++) {
3274 stripe = btrfs_stripe_nr(chunk, i);
3275 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3276 continue;
3277
3278 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3279 stripe_length = btrfs_chunk_length(leaf, chunk);
3280 stripe_length = div_u64(stripe_length, factor);
3281
3282 if (stripe_offset < bargs->pend &&
3283 stripe_offset + stripe_length > bargs->pstart)
3284 return 0;
3285 }
3286
3287 return 1;
3288}
3289
3290/* [vstart, vend) */
3291static int chunk_vrange_filter(struct extent_buffer *leaf,
3292 struct btrfs_chunk *chunk,
3293 u64 chunk_offset,
3294 struct btrfs_balance_args *bargs)
3295{
3296 if (chunk_offset < bargs->vend &&
3297 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3298 /* at least part of the chunk is inside this vrange */
3299 return 0;
3300
3301 return 1;
3302}
3303
3304static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3305 struct btrfs_chunk *chunk,
3306 struct btrfs_balance_args *bargs)
3307{
3308 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3309
3310 if (bargs->stripes_min <= num_stripes
3311 && num_stripes <= bargs->stripes_max)
3312 return 0;
3313
3314 return 1;
3315}
3316
3317static int chunk_soft_convert_filter(u64 chunk_type,
3318 struct btrfs_balance_args *bargs)
3319{
3320 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3321 return 0;
3322
3323 chunk_type = chunk_to_extended(chunk_type) &
3324 BTRFS_EXTENDED_PROFILE_MASK;
3325
3326 if (bargs->target == chunk_type)
3327 return 1;
3328
3329 return 0;
3330}
3331
3332static int should_balance_chunk(struct btrfs_root *root,
3333 struct extent_buffer *leaf,
3334 struct btrfs_chunk *chunk, u64 chunk_offset)
3335{
3336 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3337 struct btrfs_balance_args *bargs = NULL;
3338 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3339
3340 /* type filter */
3341 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3342 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3343 return 0;
3344 }
3345
3346 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3347 bargs = &bctl->data;
3348 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3349 bargs = &bctl->sys;
3350 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3351 bargs = &bctl->meta;
3352
3353 /* profiles filter */
3354 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3355 chunk_profiles_filter(chunk_type, bargs)) {
3356 return 0;
3357 }
3358
3359 /* usage filter */
3360 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3361 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3362 return 0;
3363 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3364 chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3365 return 0;
3366 }
3367
3368 /* devid filter */
3369 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3370 chunk_devid_filter(leaf, chunk, bargs)) {
3371 return 0;
3372 }
3373
3374 /* drange filter, makes sense only with devid filter */
3375 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3376 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3377 return 0;
3378 }
3379
3380 /* vrange filter */
3381 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3382 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3383 return 0;
3384 }
3385
3386 /* stripes filter */
3387 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3388 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3389 return 0;
3390 }
3391
3392 /* soft profile changing mode */
3393 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3394 chunk_soft_convert_filter(chunk_type, bargs)) {
3395 return 0;
3396 }
3397
3398 /*
3399 * limited by count, must be the last filter
3400 */
3401 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3402 if (bargs->limit == 0)
3403 return 0;
3404 else
3405 bargs->limit--;
3406 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3407 /*
3408 * Same logic as the 'limit' filter; the minimum cannot be
3409 * determined here because we do not have the global informatoin
3410 * about the count of all chunks that satisfy the filters.
3411 */
3412 if (bargs->limit_max == 0)
3413 return 0;
3414 else
3415 bargs->limit_max--;
3416 }
3417
3418 return 1;
3419}
3420
3421static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3422{
3423 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3424 struct btrfs_root *chunk_root = fs_info->chunk_root;
3425 struct btrfs_root *dev_root = fs_info->dev_root;
3426 struct list_head *devices;
3427 struct btrfs_device *device;
3428 u64 old_size;
3429 u64 size_to_free;
3430 u64 chunk_type;
3431 struct btrfs_chunk *chunk;
3432 struct btrfs_path *path;
3433 struct btrfs_key key;
3434 struct btrfs_key found_key;
3435 struct btrfs_trans_handle *trans;
3436 struct extent_buffer *leaf;
3437 int slot;
3438 int ret;
3439 int enospc_errors = 0;
3440 bool counting = true;
3441 /* The single value limit and min/max limits use the same bytes in the */
3442 u64 limit_data = bctl->data.limit;
3443 u64 limit_meta = bctl->meta.limit;
3444 u64 limit_sys = bctl->sys.limit;
3445 u32 count_data = 0;
3446 u32 count_meta = 0;
3447 u32 count_sys = 0;
3448 int chunk_reserved = 0;
3449
3450 /* step one make some room on all the devices */
3451 devices = &fs_info->fs_devices->devices;
3452 list_for_each_entry(device, devices, dev_list) {
3453 old_size = btrfs_device_get_total_bytes(device);
3454 size_to_free = div_factor(old_size, 1);
3455 size_to_free = min_t(u64, size_to_free, SZ_1M);
3456 if (!device->writeable ||
3457 btrfs_device_get_total_bytes(device) -
3458 btrfs_device_get_bytes_used(device) > size_to_free ||
3459 device->is_tgtdev_for_dev_replace)
3460 continue;
3461
3462 ret = btrfs_shrink_device(device, old_size - size_to_free);
3463 if (ret == -ENOSPC)
3464 break;
3465 BUG_ON(ret);
3466
3467 trans = btrfs_start_transaction(dev_root, 0);
3468 BUG_ON(IS_ERR(trans));
3469
3470 ret = btrfs_grow_device(trans, device, old_size);
3471 BUG_ON(ret);
3472
3473 btrfs_end_transaction(trans, dev_root);
3474 }
3475
3476 /* step two, relocate all the chunks */
3477 path = btrfs_alloc_path();
3478 if (!path) {
3479 ret = -ENOMEM;
3480 goto error;
3481 }
3482
3483 /* zero out stat counters */
3484 spin_lock(&fs_info->balance_lock);
3485 memset(&bctl->stat, 0, sizeof(bctl->stat));
3486 spin_unlock(&fs_info->balance_lock);
3487again:
3488 if (!counting) {
3489 /*
3490 * The single value limit and min/max limits use the same bytes
3491 * in the
3492 */
3493 bctl->data.limit = limit_data;
3494 bctl->meta.limit = limit_meta;
3495 bctl->sys.limit = limit_sys;
3496 }
3497 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3498 key.offset = (u64)-1;
3499 key.type = BTRFS_CHUNK_ITEM_KEY;
3500
3501 while (1) {
3502 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3503 atomic_read(&fs_info->balance_cancel_req)) {
3504 ret = -ECANCELED;
3505 goto error;
3506 }
3507
3508 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3509 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3510 if (ret < 0) {
3511 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3512 goto error;
3513 }
3514
3515 /*
3516 * this shouldn't happen, it means the last relocate
3517 * failed
3518 */
3519 if (ret == 0)
3520 BUG(); /* FIXME break ? */
3521
3522 ret = btrfs_previous_item(chunk_root, path, 0,
3523 BTRFS_CHUNK_ITEM_KEY);
3524 if (ret) {
3525 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3526 ret = 0;
3527 break;
3528 }
3529
3530 leaf = path->nodes[0];
3531 slot = path->slots[0];
3532 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3533
3534 if (found_key.objectid != key.objectid) {
3535 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3536 break;
3537 }
3538
3539 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3540 chunk_type = btrfs_chunk_type(leaf, chunk);
3541
3542 if (!counting) {
3543 spin_lock(&fs_info->balance_lock);
3544 bctl->stat.considered++;
3545 spin_unlock(&fs_info->balance_lock);
3546 }
3547
3548 ret = should_balance_chunk(chunk_root, leaf, chunk,
3549 found_key.offset);
3550
3551 btrfs_release_path(path);
3552 if (!ret) {
3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3554 goto loop;
3555 }
3556
3557 if (counting) {
3558 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3559 spin_lock(&fs_info->balance_lock);
3560 bctl->stat.expected++;
3561 spin_unlock(&fs_info->balance_lock);
3562
3563 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3564 count_data++;
3565 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3566 count_sys++;
3567 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3568 count_meta++;
3569
3570 goto loop;
3571 }
3572
3573 /*
3574 * Apply limit_min filter, no need to check if the LIMITS
3575 * filter is used, limit_min is 0 by default
3576 */
3577 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3578 count_data < bctl->data.limit_min)
3579 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3580 count_meta < bctl->meta.limit_min)
3581 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3582 count_sys < bctl->sys.limit_min)) {
3583 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3584 goto loop;
3585 }
3586
3587 if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
3588 trans = btrfs_start_transaction(chunk_root, 0);
3589 if (IS_ERR(trans)) {
3590 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3591 ret = PTR_ERR(trans);
3592 goto error;
3593 }
3594
3595 ret = btrfs_force_chunk_alloc(trans, chunk_root,
3596 BTRFS_BLOCK_GROUP_DATA);
3597 btrfs_end_transaction(trans, chunk_root);
3598 if (ret < 0) {
3599 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3600 goto error;
3601 }
3602 chunk_reserved = 1;
3603 }
3604
3605 ret = btrfs_relocate_chunk(chunk_root,
3606 found_key.offset);
3607 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3608 if (ret && ret != -ENOSPC)
3609 goto error;
3610 if (ret == -ENOSPC) {
3611 enospc_errors++;
3612 } else {
3613 spin_lock(&fs_info->balance_lock);
3614 bctl->stat.completed++;
3615 spin_unlock(&fs_info->balance_lock);
3616 }
3617loop:
3618 if (found_key.offset == 0)
3619 break;
3620 key.offset = found_key.offset - 1;
3621 }
3622
3623 if (counting) {
3624 btrfs_release_path(path);
3625 counting = false;
3626 goto again;
3627 }
3628error:
3629 btrfs_free_path(path);
3630 if (enospc_errors) {
3631 btrfs_info(fs_info, "%d enospc errors during balance",
3632 enospc_errors);
3633 if (!ret)
3634 ret = -ENOSPC;
3635 }
3636
3637 return ret;
3638}
3639
3640/**
3641 * alloc_profile_is_valid - see if a given profile is valid and reduced
3642 * @flags: profile to validate
3643 * @extended: if true @flags is treated as an extended profile
3644 */
3645static int alloc_profile_is_valid(u64 flags, int extended)
3646{
3647 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3648 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3649
3650 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3651
3652 /* 1) check that all other bits are zeroed */
3653 if (flags & ~mask)
3654 return 0;
3655
3656 /* 2) see if profile is reduced */
3657 if (flags == 0)
3658 return !extended; /* "0" is valid for usual profiles */
3659
3660 /* true if exactly one bit set */
3661 return (flags & (flags - 1)) == 0;
3662}
3663
3664static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3665{
3666 /* cancel requested || normal exit path */
3667 return atomic_read(&fs_info->balance_cancel_req) ||
3668 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3669 atomic_read(&fs_info->balance_cancel_req) == 0);
3670}
3671
3672static void __cancel_balance(struct btrfs_fs_info *fs_info)
3673{
3674 int ret;
3675
3676 unset_balance_control(fs_info);
3677 ret = del_balance_item(fs_info->tree_root);
3678 if (ret)
3679 btrfs_std_error(fs_info, ret, NULL);
3680
3681 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3682}
3683
3684/* Non-zero return value signifies invalidity */
3685static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3686 u64 allowed)
3687{
3688 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3689 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3690 (bctl_arg->target & ~allowed)));
3691}
3692
3693/*
3694 * Should be called with both balance and volume mutexes held
3695 */
3696int btrfs_balance(struct btrfs_balance_control *bctl,
3697 struct btrfs_ioctl_balance_args *bargs)
3698{
3699 struct btrfs_fs_info *fs_info = bctl->fs_info;
3700 u64 allowed;
3701 int mixed = 0;
3702 int ret;
3703 u64 num_devices;
3704 unsigned seq;
3705
3706 if (btrfs_fs_closing(fs_info) ||
3707 atomic_read(&fs_info->balance_pause_req) ||
3708 atomic_read(&fs_info->balance_cancel_req)) {
3709 ret = -EINVAL;
3710 goto out;
3711 }
3712
3713 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3714 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3715 mixed = 1;
3716
3717 /*
3718 * In case of mixed groups both data and meta should be picked,
3719 * and identical options should be given for both of them.
3720 */
3721 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3722 if (mixed && (bctl->flags & allowed)) {
3723 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3724 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3725 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3726 btrfs_err(fs_info, "with mixed groups data and "
3727 "metadata balance options must be the same");
3728 ret = -EINVAL;
3729 goto out;
3730 }
3731 }
3732
3733 num_devices = fs_info->fs_devices->num_devices;
3734 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3735 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3736 BUG_ON(num_devices < 1);
3737 num_devices--;
3738 }
3739 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3740 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3741 if (num_devices == 1)
3742 allowed |= BTRFS_BLOCK_GROUP_DUP;
3743 else if (num_devices > 1)
3744 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3745 if (num_devices > 2)
3746 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3747 if (num_devices > 3)
3748 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3749 BTRFS_BLOCK_GROUP_RAID6);
3750 if (validate_convert_profile(&bctl->data, allowed)) {
3751 btrfs_err(fs_info, "unable to start balance with target "
3752 "data profile %llu",
3753 bctl->data.target);
3754 ret = -EINVAL;
3755 goto out;
3756 }
3757 if (validate_convert_profile(&bctl->meta, allowed)) {
3758 btrfs_err(fs_info,
3759 "unable to start balance with target metadata profile %llu",
3760 bctl->meta.target);
3761 ret = -EINVAL;
3762 goto out;
3763 }
3764 if (validate_convert_profile(&bctl->sys, allowed)) {
3765 btrfs_err(fs_info,
3766 "unable to start balance with target system profile %llu",
3767 bctl->sys.target);
3768 ret = -EINVAL;
3769 goto out;
3770 }
3771
3772 /* allow to reduce meta or sys integrity only if force set */
3773 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3774 BTRFS_BLOCK_GROUP_RAID10 |
3775 BTRFS_BLOCK_GROUP_RAID5 |
3776 BTRFS_BLOCK_GROUP_RAID6;
3777 do {
3778 seq = read_seqbegin(&fs_info->profiles_lock);
3779
3780 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3781 (fs_info->avail_system_alloc_bits & allowed) &&
3782 !(bctl->sys.target & allowed)) ||
3783 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3784 (fs_info->avail_metadata_alloc_bits & allowed) &&
3785 !(bctl->meta.target & allowed))) {
3786 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3787 btrfs_info(fs_info, "force reducing metadata integrity");
3788 } else {
3789 btrfs_err(fs_info, "balance will reduce metadata "
3790 "integrity, use force if you want this");
3791 ret = -EINVAL;
3792 goto out;
3793 }
3794 }
3795 } while (read_seqretry(&fs_info->profiles_lock, seq));
3796
3797 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3798 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3799 btrfs_warn(fs_info,
3800 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3801 bctl->meta.target, bctl->data.target);
3802 }
3803
3804 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3805 fs_info->num_tolerated_disk_barrier_failures = min(
3806 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3807 btrfs_get_num_tolerated_disk_barrier_failures(
3808 bctl->sys.target));
3809 }
3810
3811 ret = insert_balance_item(fs_info->tree_root, bctl);
3812 if (ret && ret != -EEXIST)
3813 goto out;
3814
3815 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3816 BUG_ON(ret == -EEXIST);
3817 set_balance_control(bctl);
3818 } else {
3819 BUG_ON(ret != -EEXIST);
3820 spin_lock(&fs_info->balance_lock);
3821 update_balance_args(bctl);
3822 spin_unlock(&fs_info->balance_lock);
3823 }
3824
3825 atomic_inc(&fs_info->balance_running);
3826 mutex_unlock(&fs_info->balance_mutex);
3827
3828 ret = __btrfs_balance(fs_info);
3829
3830 mutex_lock(&fs_info->balance_mutex);
3831 atomic_dec(&fs_info->balance_running);
3832
3833 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3834 fs_info->num_tolerated_disk_barrier_failures =
3835 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3836 }
3837
3838 if (bargs) {
3839 memset(bargs, 0, sizeof(*bargs));
3840 update_ioctl_balance_args(fs_info, 0, bargs);
3841 }
3842
3843 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3844 balance_need_close(fs_info)) {
3845 __cancel_balance(fs_info);
3846 }
3847
3848 wake_up(&fs_info->balance_wait_q);
3849
3850 return ret;
3851out:
3852 if (bctl->flags & BTRFS_BALANCE_RESUME)
3853 __cancel_balance(fs_info);
3854 else {
3855 kfree(bctl);
3856 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3857 }
3858 return ret;
3859}
3860
3861static int balance_kthread(void *data)
3862{
3863 struct btrfs_fs_info *fs_info = data;
3864 int ret = 0;
3865
3866 mutex_lock(&fs_info->volume_mutex);
3867 mutex_lock(&fs_info->balance_mutex);
3868
3869 if (fs_info->balance_ctl) {
3870 btrfs_info(fs_info, "continuing balance");
3871 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3872 }
3873
3874 mutex_unlock(&fs_info->balance_mutex);
3875 mutex_unlock(&fs_info->volume_mutex);
3876
3877 return ret;
3878}
3879
3880int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3881{
3882 struct task_struct *tsk;
3883
3884 spin_lock(&fs_info->balance_lock);
3885 if (!fs_info->balance_ctl) {
3886 spin_unlock(&fs_info->balance_lock);
3887 return 0;
3888 }
3889 spin_unlock(&fs_info->balance_lock);
3890
3891 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3892 btrfs_info(fs_info, "force skipping balance");
3893 return 0;
3894 }
3895
3896 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3897 return PTR_ERR_OR_ZERO(tsk);
3898}
3899
3900int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3901{
3902 struct btrfs_balance_control *bctl;
3903 struct btrfs_balance_item *item;
3904 struct btrfs_disk_balance_args disk_bargs;
3905 struct btrfs_path *path;
3906 struct extent_buffer *leaf;
3907 struct btrfs_key key;
3908 int ret;
3909
3910 path = btrfs_alloc_path();
3911 if (!path)
3912 return -ENOMEM;
3913
3914 key.objectid = BTRFS_BALANCE_OBJECTID;
3915 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3916 key.offset = 0;
3917
3918 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3919 if (ret < 0)
3920 goto out;
3921 if (ret > 0) { /* ret = -ENOENT; */
3922 ret = 0;
3923 goto out;
3924 }
3925
3926 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3927 if (!bctl) {
3928 ret = -ENOMEM;
3929 goto out;
3930 }
3931
3932 leaf = path->nodes[0];
3933 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3934
3935 bctl->fs_info = fs_info;
3936 bctl->flags = btrfs_balance_flags(leaf, item);
3937 bctl->flags |= BTRFS_BALANCE_RESUME;
3938
3939 btrfs_balance_data(leaf, item, &disk_bargs);
3940 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3941 btrfs_balance_meta(leaf, item, &disk_bargs);
3942 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3943 btrfs_balance_sys(leaf, item, &disk_bargs);
3944 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3945
3946 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3947
3948 mutex_lock(&fs_info->volume_mutex);
3949 mutex_lock(&fs_info->balance_mutex);
3950
3951 set_balance_control(bctl);
3952
3953 mutex_unlock(&fs_info->balance_mutex);
3954 mutex_unlock(&fs_info->volume_mutex);
3955out:
3956 btrfs_free_path(path);
3957 return ret;
3958}
3959
3960int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3961{
3962 int ret = 0;
3963
3964 mutex_lock(&fs_info->balance_mutex);
3965 if (!fs_info->balance_ctl) {
3966 mutex_unlock(&fs_info->balance_mutex);
3967 return -ENOTCONN;
3968 }
3969
3970 if (atomic_read(&fs_info->balance_running)) {
3971 atomic_inc(&fs_info->balance_pause_req);
3972 mutex_unlock(&fs_info->balance_mutex);
3973
3974 wait_event(fs_info->balance_wait_q,
3975 atomic_read(&fs_info->balance_running) == 0);
3976
3977 mutex_lock(&fs_info->balance_mutex);
3978 /* we are good with balance_ctl ripped off from under us */
3979 BUG_ON(atomic_read(&fs_info->balance_running));
3980 atomic_dec(&fs_info->balance_pause_req);
3981 } else {
3982 ret = -ENOTCONN;
3983 }
3984
3985 mutex_unlock(&fs_info->balance_mutex);
3986 return ret;
3987}
3988
3989int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3990{
3991 if (fs_info->sb->s_flags & MS_RDONLY)
3992 return -EROFS;
3993
3994 mutex_lock(&fs_info->balance_mutex);
3995 if (!fs_info->balance_ctl) {
3996 mutex_unlock(&fs_info->balance_mutex);
3997 return -ENOTCONN;
3998 }
3999
4000 atomic_inc(&fs_info->balance_cancel_req);
4001 /*
4002 * if we are running just wait and return, balance item is
4003 * deleted in btrfs_balance in this case
4004 */
4005 if (atomic_read(&fs_info->balance_running)) {
4006 mutex_unlock(&fs_info->balance_mutex);
4007 wait_event(fs_info->balance_wait_q,
4008 atomic_read(&fs_info->balance_running) == 0);
4009 mutex_lock(&fs_info->balance_mutex);
4010 } else {
4011 /* __cancel_balance needs volume_mutex */
4012 mutex_unlock(&fs_info->balance_mutex);
4013 mutex_lock(&fs_info->volume_mutex);
4014 mutex_lock(&fs_info->balance_mutex);
4015
4016 if (fs_info->balance_ctl)
4017 __cancel_balance(fs_info);
4018
4019 mutex_unlock(&fs_info->volume_mutex);
4020 }
4021
4022 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
4023 atomic_dec(&fs_info->balance_cancel_req);
4024 mutex_unlock(&fs_info->balance_mutex);
4025 return 0;
4026}
4027
4028static int btrfs_uuid_scan_kthread(void *data)
4029{
4030 struct btrfs_fs_info *fs_info = data;
4031 struct btrfs_root *root = fs_info->tree_root;
4032 struct btrfs_key key;
4033 struct btrfs_key max_key;
4034 struct btrfs_path *path = NULL;
4035 int ret = 0;
4036 struct extent_buffer *eb;
4037 int slot;
4038 struct btrfs_root_item root_item;
4039 u32 item_size;
4040 struct btrfs_trans_handle *trans = NULL;
4041
4042 path = btrfs_alloc_path();
4043 if (!path) {
4044 ret = -ENOMEM;
4045 goto out;
4046 }
4047
4048 key.objectid = 0;
4049 key.type = BTRFS_ROOT_ITEM_KEY;
4050 key.offset = 0;
4051
4052 max_key.objectid = (u64)-1;
4053 max_key.type = BTRFS_ROOT_ITEM_KEY;
4054 max_key.offset = (u64)-1;
4055
4056 while (1) {
4057 ret = btrfs_search_forward(root, &key, path, 0);
4058 if (ret) {
4059 if (ret > 0)
4060 ret = 0;
4061 break;
4062 }
4063
4064 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4065 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4066 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4067 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4068 goto skip;
4069
4070 eb = path->nodes[0];
4071 slot = path->slots[0];
4072 item_size = btrfs_item_size_nr(eb, slot);
4073 if (item_size < sizeof(root_item))
4074 goto skip;
4075
4076 read_extent_buffer(eb, &root_item,
4077 btrfs_item_ptr_offset(eb, slot),
4078 (int)sizeof(root_item));
4079 if (btrfs_root_refs(&root_item) == 0)
4080 goto skip;
4081
4082 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4083 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4084 if (trans)
4085 goto update_tree;
4086
4087 btrfs_release_path(path);
4088 /*
4089 * 1 - subvol uuid item
4090 * 1 - received_subvol uuid item
4091 */
4092 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4093 if (IS_ERR(trans)) {
4094 ret = PTR_ERR(trans);
4095 break;
4096 }
4097 continue;
4098 } else {
4099 goto skip;
4100 }
4101update_tree:
4102 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4103 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4104 root_item.uuid,
4105 BTRFS_UUID_KEY_SUBVOL,
4106 key.objectid);
4107 if (ret < 0) {
4108 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4109 ret);
4110 break;
4111 }
4112 }
4113
4114 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4115 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4116 root_item.received_uuid,
4117 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4118 key.objectid);
4119 if (ret < 0) {
4120 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4121 ret);
4122 break;
4123 }
4124 }
4125
4126skip:
4127 if (trans) {
4128 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4129 trans = NULL;
4130 if (ret)
4131 break;
4132 }
4133
4134 btrfs_release_path(path);
4135 if (key.offset < (u64)-1) {
4136 key.offset++;
4137 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4138 key.offset = 0;
4139 key.type = BTRFS_ROOT_ITEM_KEY;
4140 } else if (key.objectid < (u64)-1) {
4141 key.offset = 0;
4142 key.type = BTRFS_ROOT_ITEM_KEY;
4143 key.objectid++;
4144 } else {
4145 break;
4146 }
4147 cond_resched();
4148 }
4149
4150out:
4151 btrfs_free_path(path);
4152 if (trans && !IS_ERR(trans))
4153 btrfs_end_transaction(trans, fs_info->uuid_root);
4154 if (ret)
4155 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4156 else
4157 fs_info->update_uuid_tree_gen = 1;
4158 up(&fs_info->uuid_tree_rescan_sem);
4159 return 0;
4160}
4161
4162/*
4163 * Callback for btrfs_uuid_tree_iterate().
4164 * returns:
4165 * 0 check succeeded, the entry is not outdated.
4166 * < 0 if an error occurred.
4167 * > 0 if the check failed, which means the caller shall remove the entry.
4168 */
4169static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4170 u8 *uuid, u8 type, u64 subid)
4171{
4172 struct btrfs_key key;
4173 int ret = 0;
4174 struct btrfs_root *subvol_root;
4175
4176 if (type != BTRFS_UUID_KEY_SUBVOL &&
4177 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4178 goto out;
4179
4180 key.objectid = subid;
4181 key.type = BTRFS_ROOT_ITEM_KEY;
4182 key.offset = (u64)-1;
4183 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4184 if (IS_ERR(subvol_root)) {
4185 ret = PTR_ERR(subvol_root);
4186 if (ret == -ENOENT)
4187 ret = 1;
4188 goto out;
4189 }
4190
4191 switch (type) {
4192 case BTRFS_UUID_KEY_SUBVOL:
4193 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4194 ret = 1;
4195 break;
4196 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4197 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4198 BTRFS_UUID_SIZE))
4199 ret = 1;
4200 break;
4201 }
4202
4203out:
4204 return ret;
4205}
4206
4207static int btrfs_uuid_rescan_kthread(void *data)
4208{
4209 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4210 int ret;
4211
4212 /*
4213 * 1st step is to iterate through the existing UUID tree and
4214 * to delete all entries that contain outdated data.
4215 * 2nd step is to add all missing entries to the UUID tree.
4216 */
4217 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4218 if (ret < 0) {
4219 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4220 up(&fs_info->uuid_tree_rescan_sem);
4221 return ret;
4222 }
4223 return btrfs_uuid_scan_kthread(data);
4224}
4225
4226int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4227{
4228 struct btrfs_trans_handle *trans;
4229 struct btrfs_root *tree_root = fs_info->tree_root;
4230 struct btrfs_root *uuid_root;
4231 struct task_struct *task;
4232 int ret;
4233
4234 /*
4235 * 1 - root node
4236 * 1 - root item
4237 */
4238 trans = btrfs_start_transaction(tree_root, 2);
4239 if (IS_ERR(trans))
4240 return PTR_ERR(trans);
4241
4242 uuid_root = btrfs_create_tree(trans, fs_info,
4243 BTRFS_UUID_TREE_OBJECTID);
4244 if (IS_ERR(uuid_root)) {
4245 ret = PTR_ERR(uuid_root);
4246 btrfs_abort_transaction(trans, tree_root, ret);
4247 return ret;
4248 }
4249
4250 fs_info->uuid_root = uuid_root;
4251
4252 ret = btrfs_commit_transaction(trans, tree_root);
4253 if (ret)
4254 return ret;
4255
4256 down(&fs_info->uuid_tree_rescan_sem);
4257 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4258 if (IS_ERR(task)) {
4259 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4260 btrfs_warn(fs_info, "failed to start uuid_scan task");
4261 up(&fs_info->uuid_tree_rescan_sem);
4262 return PTR_ERR(task);
4263 }
4264
4265 return 0;
4266}
4267
4268int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4269{
4270 struct task_struct *task;
4271
4272 down(&fs_info->uuid_tree_rescan_sem);
4273 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4274 if (IS_ERR(task)) {
4275 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4276 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4277 up(&fs_info->uuid_tree_rescan_sem);
4278 return PTR_ERR(task);
4279 }
4280
4281 return 0;
4282}
4283
4284/*
4285 * shrinking a device means finding all of the device extents past
4286 * the new size, and then following the back refs to the chunks.
4287 * The chunk relocation code actually frees the device extent
4288 */
4289int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4290{
4291 struct btrfs_trans_handle *trans;
4292 struct btrfs_root *root = device->dev_root;
4293 struct btrfs_dev_extent *dev_extent = NULL;
4294 struct btrfs_path *path;
4295 u64 length;
4296 u64 chunk_offset;
4297 int ret;
4298 int slot;
4299 int failed = 0;
4300 bool retried = false;
4301 bool checked_pending_chunks = false;
4302 struct extent_buffer *l;
4303 struct btrfs_key key;
4304 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4305 u64 old_total = btrfs_super_total_bytes(super_copy);
4306 u64 old_size = btrfs_device_get_total_bytes(device);
4307 u64 diff = old_size - new_size;
4308
4309 if (device->is_tgtdev_for_dev_replace)
4310 return -EINVAL;
4311
4312 path = btrfs_alloc_path();
4313 if (!path)
4314 return -ENOMEM;
4315
4316 path->reada = READA_FORWARD;
4317
4318 lock_chunks(root);
4319
4320 btrfs_device_set_total_bytes(device, new_size);
4321 if (device->writeable) {
4322 device->fs_devices->total_rw_bytes -= diff;
4323 spin_lock(&root->fs_info->free_chunk_lock);
4324 root->fs_info->free_chunk_space -= diff;
4325 spin_unlock(&root->fs_info->free_chunk_lock);
4326 }
4327 unlock_chunks(root);
4328
4329again:
4330 key.objectid = device->devid;
4331 key.offset = (u64)-1;
4332 key.type = BTRFS_DEV_EXTENT_KEY;
4333
4334 do {
4335 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4336 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4337 if (ret < 0) {
4338 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4339 goto done;
4340 }
4341
4342 ret = btrfs_previous_item(root, path, 0, key.type);
4343 if (ret)
4344 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4345 if (ret < 0)
4346 goto done;
4347 if (ret) {
4348 ret = 0;
4349 btrfs_release_path(path);
4350 break;
4351 }
4352
4353 l = path->nodes[0];
4354 slot = path->slots[0];
4355 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4356
4357 if (key.objectid != device->devid) {
4358 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4359 btrfs_release_path(path);
4360 break;
4361 }
4362
4363 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4364 length = btrfs_dev_extent_length(l, dev_extent);
4365
4366 if (key.offset + length <= new_size) {
4367 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4368 btrfs_release_path(path);
4369 break;
4370 }
4371
4372 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4373 btrfs_release_path(path);
4374
4375 ret = btrfs_relocate_chunk(root, chunk_offset);
4376 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4377 if (ret && ret != -ENOSPC)
4378 goto done;
4379 if (ret == -ENOSPC)
4380 failed++;
4381 } while (key.offset-- > 0);
4382
4383 if (failed && !retried) {
4384 failed = 0;
4385 retried = true;
4386 goto again;
4387 } else if (failed && retried) {
4388 ret = -ENOSPC;
4389 goto done;
4390 }
4391
4392 /* Shrinking succeeded, else we would be at "done". */
4393 trans = btrfs_start_transaction(root, 0);
4394 if (IS_ERR(trans)) {
4395 ret = PTR_ERR(trans);
4396 goto done;
4397 }
4398
4399 lock_chunks(root);
4400
4401 /*
4402 * We checked in the above loop all device extents that were already in
4403 * the device tree. However before we have updated the device's
4404 * total_bytes to the new size, we might have had chunk allocations that
4405 * have not complete yet (new block groups attached to transaction
4406 * handles), and therefore their device extents were not yet in the
4407 * device tree and we missed them in the loop above. So if we have any
4408 * pending chunk using a device extent that overlaps the device range
4409 * that we can not use anymore, commit the current transaction and
4410 * repeat the search on the device tree - this way we guarantee we will
4411 * not have chunks using device extents that end beyond 'new_size'.
4412 */
4413 if (!checked_pending_chunks) {
4414 u64 start = new_size;
4415 u64 len = old_size - new_size;
4416
4417 if (contains_pending_extent(trans->transaction, device,
4418 &start, len)) {
4419 unlock_chunks(root);
4420 checked_pending_chunks = true;
4421 failed = 0;
4422 retried = false;
4423 ret = btrfs_commit_transaction(trans, root);
4424 if (ret)
4425 goto done;
4426 goto again;
4427 }
4428 }
4429
4430 btrfs_device_set_disk_total_bytes(device, new_size);
4431 if (list_empty(&device->resized_list))
4432 list_add_tail(&device->resized_list,
4433 &root->fs_info->fs_devices->resized_devices);
4434
4435 WARN_ON(diff > old_total);
4436 btrfs_set_super_total_bytes(super_copy, old_total - diff);
4437 unlock_chunks(root);
4438
4439 /* Now btrfs_update_device() will change the on-disk size. */
4440 ret = btrfs_update_device(trans, device);
4441 btrfs_end_transaction(trans, root);
4442done:
4443 btrfs_free_path(path);
4444 if (ret) {
4445 lock_chunks(root);
4446 btrfs_device_set_total_bytes(device, old_size);
4447 if (device->writeable)
4448 device->fs_devices->total_rw_bytes += diff;
4449 spin_lock(&root->fs_info->free_chunk_lock);
4450 root->fs_info->free_chunk_space += diff;
4451 spin_unlock(&root->fs_info->free_chunk_lock);
4452 unlock_chunks(root);
4453 }
4454 return ret;
4455}
4456
4457static int btrfs_add_system_chunk(struct btrfs_root *root,
4458 struct btrfs_key *key,
4459 struct btrfs_chunk *chunk, int item_size)
4460{
4461 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4462 struct btrfs_disk_key disk_key;
4463 u32 array_size;
4464 u8 *ptr;
4465
4466 lock_chunks(root);
4467 array_size = btrfs_super_sys_array_size(super_copy);
4468 if (array_size + item_size + sizeof(disk_key)
4469 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4470 unlock_chunks(root);
4471 return -EFBIG;
4472 }
4473
4474 ptr = super_copy->sys_chunk_array + array_size;
4475 btrfs_cpu_key_to_disk(&disk_key, key);
4476 memcpy(ptr, &disk_key, sizeof(disk_key));
4477 ptr += sizeof(disk_key);
4478 memcpy(ptr, chunk, item_size);
4479 item_size += sizeof(disk_key);
4480 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4481 unlock_chunks(root);
4482
4483 return 0;
4484}
4485
4486/*
4487 * sort the devices in descending order by max_avail, total_avail
4488 */
4489static int btrfs_cmp_device_info(const void *a, const void *b)
4490{
4491 const struct btrfs_device_info *di_a = a;
4492 const struct btrfs_device_info *di_b = b;
4493
4494 if (di_a->max_avail > di_b->max_avail)
4495 return -1;
4496 if (di_a->max_avail < di_b->max_avail)
4497 return 1;
4498 if (di_a->total_avail > di_b->total_avail)
4499 return -1;
4500 if (di_a->total_avail < di_b->total_avail)
4501 return 1;
4502 return 0;
4503}
4504
4505static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4506{
4507 /* TODO allow them to set a preferred stripe size */
4508 return SZ_64K;
4509}
4510
4511static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4512{
4513 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4514 return;
4515
4516 btrfs_set_fs_incompat(info, RAID56);
4517}
4518
4519#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
4520 - sizeof(struct btrfs_item) \
4521 - sizeof(struct btrfs_chunk)) \
4522 / sizeof(struct btrfs_stripe) + 1)
4523
4524#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
4525 - 2 * sizeof(struct btrfs_disk_key) \
4526 - 2 * sizeof(struct btrfs_chunk)) \
4527 / sizeof(struct btrfs_stripe) + 1)
4528
4529static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4530 struct btrfs_root *extent_root, u64 start,
4531 u64 type)
4532{
4533 struct btrfs_fs_info *info = extent_root->fs_info;
4534 struct btrfs_fs_devices *fs_devices = info->fs_devices;
4535 struct list_head *cur;
4536 struct map_lookup *map = NULL;
4537 struct extent_map_tree *em_tree;
4538 struct extent_map *em;
4539 struct btrfs_device_info *devices_info = NULL;
4540 u64 total_avail;
4541 int num_stripes; /* total number of stripes to allocate */
4542 int data_stripes; /* number of stripes that count for
4543 block group size */
4544 int sub_stripes; /* sub_stripes info for map */
4545 int dev_stripes; /* stripes per dev */
4546 int devs_max; /* max devs to use */
4547 int devs_min; /* min devs needed */
4548 int devs_increment; /* ndevs has to be a multiple of this */
4549 int ncopies; /* how many copies to data has */
4550 int ret;
4551 u64 max_stripe_size;
4552 u64 max_chunk_size;
4553 u64 stripe_size;
4554 u64 num_bytes;
4555 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4556 int ndevs;
4557 int i;
4558 int j;
4559 int index;
4560
4561 BUG_ON(!alloc_profile_is_valid(type, 0));
4562
4563 if (list_empty(&fs_devices->alloc_list))
4564 return -ENOSPC;
4565
4566 index = __get_raid_index(type);
4567
4568 sub_stripes = btrfs_raid_array[index].sub_stripes;
4569 dev_stripes = btrfs_raid_array[index].dev_stripes;
4570 devs_max = btrfs_raid_array[index].devs_max;
4571 devs_min = btrfs_raid_array[index].devs_min;
4572 devs_increment = btrfs_raid_array[index].devs_increment;
4573 ncopies = btrfs_raid_array[index].ncopies;
4574
4575 if (type & BTRFS_BLOCK_GROUP_DATA) {
4576 max_stripe_size = SZ_1G;
4577 max_chunk_size = 10 * max_stripe_size;
4578 if (!devs_max)
4579 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4580 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4581 /* for larger filesystems, use larger metadata chunks */
4582 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4583 max_stripe_size = SZ_1G;
4584 else
4585 max_stripe_size = SZ_256M;
4586 max_chunk_size = max_stripe_size;
4587 if (!devs_max)
4588 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4589 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4590 max_stripe_size = SZ_32M;
4591 max_chunk_size = 2 * max_stripe_size;
4592 if (!devs_max)
4593 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4594 } else {
4595 btrfs_err(info, "invalid chunk type 0x%llx requested",
4596 type);
4597 BUG_ON(1);
4598 }
4599
4600 /* we don't want a chunk larger than 10% of writeable space */
4601 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4602 max_chunk_size);
4603
4604 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4605 GFP_NOFS);
4606 if (!devices_info)
4607 return -ENOMEM;
4608
4609 cur = fs_devices->alloc_list.next;
4610
4611 /*
4612 * in the first pass through the devices list, we gather information
4613 * about the available holes on each device.
4614 */
4615 ndevs = 0;
4616 while (cur != &fs_devices->alloc_list) {
4617 struct btrfs_device *device;
4618 u64 max_avail;
4619 u64 dev_offset;
4620
4621 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4622
4623 cur = cur->next;
4624
4625 if (!device->writeable) {
4626 WARN(1, KERN_ERR
4627 "BTRFS: read-only device in alloc_list\n");
4628 continue;
4629 }
4630
4631 if (!device->in_fs_metadata ||
4632 device->is_tgtdev_for_dev_replace)
4633 continue;
4634
4635 if (device->total_bytes > device->bytes_used)
4636 total_avail = device->total_bytes - device->bytes_used;
4637 else
4638 total_avail = 0;
4639
4640 /* If there is no space on this device, skip it. */
4641 if (total_avail == 0)
4642 continue;
4643
4644 ret = find_free_dev_extent(trans, device,
4645 max_stripe_size * dev_stripes,
4646 &dev_offset, &max_avail);
4647 if (ret && ret != -ENOSPC)
4648 goto error;
4649
4650 if (ret == 0)
4651 max_avail = max_stripe_size * dev_stripes;
4652
4653 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4654 continue;
4655
4656 if (ndevs == fs_devices->rw_devices) {
4657 WARN(1, "%s: found more than %llu devices\n",
4658 __func__, fs_devices->rw_devices);
4659 break;
4660 }
4661 devices_info[ndevs].dev_offset = dev_offset;
4662 devices_info[ndevs].max_avail = max_avail;
4663 devices_info[ndevs].total_avail = total_avail;
4664 devices_info[ndevs].dev = device;
4665 ++ndevs;
4666 }
4667
4668 /*
4669 * now sort the devices by hole size / available space
4670 */
4671 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4672 btrfs_cmp_device_info, NULL);
4673
4674 /* round down to number of usable stripes */
4675 ndevs -= ndevs % devs_increment;
4676
4677 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4678 ret = -ENOSPC;
4679 goto error;
4680 }
4681
4682 if (devs_max && ndevs > devs_max)
4683 ndevs = devs_max;
4684 /*
4685 * the primary goal is to maximize the number of stripes, so use as many
4686 * devices as possible, even if the stripes are not maximum sized.
4687 */
4688 stripe_size = devices_info[ndevs-1].max_avail;
4689 num_stripes = ndevs * dev_stripes;
4690
4691 /*
4692 * this will have to be fixed for RAID1 and RAID10 over
4693 * more drives
4694 */
4695 data_stripes = num_stripes / ncopies;
4696
4697 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4698 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4699 btrfs_super_stripesize(info->super_copy));
4700 data_stripes = num_stripes - 1;
4701 }
4702 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4703 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4704 btrfs_super_stripesize(info->super_copy));
4705 data_stripes = num_stripes - 2;
4706 }
4707
4708 /*
4709 * Use the number of data stripes to figure out how big this chunk
4710 * is really going to be in terms of logical address space,
4711 * and compare that answer with the max chunk size
4712 */
4713 if (stripe_size * data_stripes > max_chunk_size) {
4714 u64 mask = (1ULL << 24) - 1;
4715
4716 stripe_size = div_u64(max_chunk_size, data_stripes);
4717
4718 /* bump the answer up to a 16MB boundary */
4719 stripe_size = (stripe_size + mask) & ~mask;
4720
4721 /* but don't go higher than the limits we found
4722 * while searching for free extents
4723 */
4724 if (stripe_size > devices_info[ndevs-1].max_avail)
4725 stripe_size = devices_info[ndevs-1].max_avail;
4726 }
4727
4728 stripe_size = div_u64(stripe_size, dev_stripes);
4729
4730 /* align to BTRFS_STRIPE_LEN */
4731 stripe_size = div_u64(stripe_size, raid_stripe_len);
4732 stripe_size *= raid_stripe_len;
4733
4734 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4735 if (!map) {
4736 ret = -ENOMEM;
4737 goto error;
4738 }
4739 map->num_stripes = num_stripes;
4740
4741 for (i = 0; i < ndevs; ++i) {
4742 for (j = 0; j < dev_stripes; ++j) {
4743 int s = i * dev_stripes + j;
4744 map->stripes[s].dev = devices_info[i].dev;
4745 map->stripes[s].physical = devices_info[i].dev_offset +
4746 j * stripe_size;
4747 }
4748 }
4749 map->sector_size = extent_root->sectorsize;
4750 map->stripe_len = raid_stripe_len;
4751 map->io_align = raid_stripe_len;
4752 map->io_width = raid_stripe_len;
4753 map->type = type;
4754 map->sub_stripes = sub_stripes;
4755
4756 num_bytes = stripe_size * data_stripes;
4757
4758 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4759
4760 em = alloc_extent_map();
4761 if (!em) {
4762 kfree(map);
4763 ret = -ENOMEM;
4764 goto error;
4765 }
4766 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4767 em->map_lookup = map;
4768 em->start = start;
4769 em->len = num_bytes;
4770 em->block_start = 0;
4771 em->block_len = em->len;
4772 em->orig_block_len = stripe_size;
4773
4774 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4775 write_lock(&em_tree->lock);
4776 ret = add_extent_mapping(em_tree, em, 0);
4777 if (!ret) {
4778 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4779 atomic_inc(&em->refs);
4780 }
4781 write_unlock(&em_tree->lock);
4782 if (ret) {
4783 free_extent_map(em);
4784 goto error;
4785 }
4786
4787 ret = btrfs_make_block_group(trans, extent_root, 0, type,
4788 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4789 start, num_bytes);
4790 if (ret)
4791 goto error_del_extent;
4792
4793 for (i = 0; i < map->num_stripes; i++) {
4794 num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4795 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4796 }
4797
4798 spin_lock(&extent_root->fs_info->free_chunk_lock);
4799 extent_root->fs_info->free_chunk_space -= (stripe_size *
4800 map->num_stripes);
4801 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4802
4803 free_extent_map(em);
4804 check_raid56_incompat_flag(extent_root->fs_info, type);
4805
4806 kfree(devices_info);
4807 return 0;
4808
4809error_del_extent:
4810 write_lock(&em_tree->lock);
4811 remove_extent_mapping(em_tree, em);
4812 write_unlock(&em_tree->lock);
4813
4814 /* One for our allocation */
4815 free_extent_map(em);
4816 /* One for the tree reference */
4817 free_extent_map(em);
4818 /* One for the pending_chunks list reference */
4819 free_extent_map(em);
4820error:
4821 kfree(devices_info);
4822 return ret;
4823}
4824
4825int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4826 struct btrfs_root *extent_root,
4827 u64 chunk_offset, u64 chunk_size)
4828{
4829 struct btrfs_key key;
4830 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4831 struct btrfs_device *device;
4832 struct btrfs_chunk *chunk;
4833 struct btrfs_stripe *stripe;
4834 struct extent_map_tree *em_tree;
4835 struct extent_map *em;
4836 struct map_lookup *map;
4837 size_t item_size;
4838 u64 dev_offset;
4839 u64 stripe_size;
4840 int i = 0;
4841 int ret = 0;
4842
4843 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4844 read_lock(&em_tree->lock);
4845 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4846 read_unlock(&em_tree->lock);
4847
4848 if (!em) {
4849 btrfs_crit(extent_root->fs_info, "unable to find logical "
4850 "%Lu len %Lu", chunk_offset, chunk_size);
4851 return -EINVAL;
4852 }
4853
4854 if (em->start != chunk_offset || em->len != chunk_size) {
4855 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4856 " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4857 chunk_size, em->start, em->len);
4858 free_extent_map(em);
4859 return -EINVAL;
4860 }
4861
4862 map = em->map_lookup;
4863 item_size = btrfs_chunk_item_size(map->num_stripes);
4864 stripe_size = em->orig_block_len;
4865
4866 chunk = kzalloc(item_size, GFP_NOFS);
4867 if (!chunk) {
4868 ret = -ENOMEM;
4869 goto out;
4870 }
4871
4872 /*
4873 * Take the device list mutex to prevent races with the final phase of
4874 * a device replace operation that replaces the device object associated
4875 * with the map's stripes, because the device object's id can change
4876 * at any time during that final phase of the device replace operation
4877 * (dev-replace.c:btrfs_dev_replace_finishing()).
4878 */
4879 mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4880 for (i = 0; i < map->num_stripes; i++) {
4881 device = map->stripes[i].dev;
4882 dev_offset = map->stripes[i].physical;
4883
4884 ret = btrfs_update_device(trans, device);
4885 if (ret)
4886 break;
4887 ret = btrfs_alloc_dev_extent(trans, device,
4888 chunk_root->root_key.objectid,
4889 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4890 chunk_offset, dev_offset,
4891 stripe_size);
4892 if (ret)
4893 break;
4894 }
4895 if (ret) {
4896 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4897 goto out;
4898 }
4899
4900 stripe = &chunk->stripe;
4901 for (i = 0; i < map->num_stripes; i++) {
4902 device = map->stripes[i].dev;
4903 dev_offset = map->stripes[i].physical;
4904
4905 btrfs_set_stack_stripe_devid(stripe, device->devid);
4906 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4907 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4908 stripe++;
4909 }
4910 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4911
4912 btrfs_set_stack_chunk_length(chunk, chunk_size);
4913 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4914 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4915 btrfs_set_stack_chunk_type(chunk, map->type);
4916 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4917 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4918 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4919 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4920 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4921
4922 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4923 key.type = BTRFS_CHUNK_ITEM_KEY;
4924 key.offset = chunk_offset;
4925
4926 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4927 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4928 /*
4929 * TODO: Cleanup of inserted chunk root in case of
4930 * failure.
4931 */
4932 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4933 item_size);
4934 }
4935
4936out:
4937 kfree(chunk);
4938 free_extent_map(em);
4939 return ret;
4940}
4941
4942/*
4943 * Chunk allocation falls into two parts. The first part does works
4944 * that make the new allocated chunk useable, but not do any operation
4945 * that modifies the chunk tree. The second part does the works that
4946 * require modifying the chunk tree. This division is important for the
4947 * bootstrap process of adding storage to a seed btrfs.
4948 */
4949int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4950 struct btrfs_root *extent_root, u64 type)
4951{
4952 u64 chunk_offset;
4953
4954 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4955 chunk_offset = find_next_chunk(extent_root->fs_info);
4956 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4957}
4958
4959static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4960 struct btrfs_root *root,
4961 struct btrfs_device *device)
4962{
4963 u64 chunk_offset;
4964 u64 sys_chunk_offset;
4965 u64 alloc_profile;
4966 struct btrfs_fs_info *fs_info = root->fs_info;
4967 struct btrfs_root *extent_root = fs_info->extent_root;
4968 int ret;
4969
4970 chunk_offset = find_next_chunk(fs_info);
4971 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4972 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4973 alloc_profile);
4974 if (ret)
4975 return ret;
4976
4977 sys_chunk_offset = find_next_chunk(root->fs_info);
4978 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4979 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4980 alloc_profile);
4981 return ret;
4982}
4983
4984static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4985{
4986 int max_errors;
4987
4988 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4989 BTRFS_BLOCK_GROUP_RAID10 |
4990 BTRFS_BLOCK_GROUP_RAID5 |
4991 BTRFS_BLOCK_GROUP_DUP)) {
4992 max_errors = 1;
4993 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4994 max_errors = 2;
4995 } else {
4996 max_errors = 0;
4997 }
4998
4999 return max_errors;
5000}
5001
5002int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
5003{
5004 struct extent_map *em;
5005 struct map_lookup *map;
5006 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5007 int readonly = 0;
5008 int miss_ndevs = 0;
5009 int i;
5010
5011 read_lock(&map_tree->map_tree.lock);
5012 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5013 read_unlock(&map_tree->map_tree.lock);
5014 if (!em)
5015 return 1;
5016
5017 map = em->map_lookup;
5018 for (i = 0; i < map->num_stripes; i++) {
5019 if (map->stripes[i].dev->missing) {
5020 miss_ndevs++;
5021 continue;
5022 }
5023
5024 if (!map->stripes[i].dev->writeable) {
5025 readonly = 1;
5026 goto end;
5027 }
5028 }
5029
5030 /*
5031 * If the number of missing devices is larger than max errors,
5032 * we can not write the data into that chunk successfully, so
5033 * set it readonly.
5034 */
5035 if (miss_ndevs > btrfs_chunk_max_errors(map))
5036 readonly = 1;
5037end:
5038 free_extent_map(em);
5039 return readonly;
5040}
5041
5042void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5043{
5044 extent_map_tree_init(&tree->map_tree);
5045}
5046
5047void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5048{
5049 struct extent_map *em;
5050
5051 while (1) {
5052 write_lock(&tree->map_tree.lock);
5053 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5054 if (em)
5055 remove_extent_mapping(&tree->map_tree, em);
5056 write_unlock(&tree->map_tree.lock);
5057 if (!em)
5058 break;
5059 /* once for us */
5060 free_extent_map(em);
5061 /* once for the tree */
5062 free_extent_map(em);
5063 }
5064}
5065
5066int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5067{
5068 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5069 struct extent_map *em;
5070 struct map_lookup *map;
5071 struct extent_map_tree *em_tree = &map_tree->map_tree;
5072 int ret;
5073
5074 read_lock(&em_tree->lock);
5075 em = lookup_extent_mapping(em_tree, logical, len);
5076 read_unlock(&em_tree->lock);
5077
5078 /*
5079 * We could return errors for these cases, but that could get ugly and
5080 * we'd probably do the same thing which is just not do anything else
5081 * and exit, so return 1 so the callers don't try to use other copies.
5082 */
5083 if (!em) {
5084 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5085 logical+len);
5086 return 1;
5087 }
5088
5089 if (em->start > logical || em->start + em->len < logical) {
5090 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5091 "%Lu-%Lu", logical, logical+len, em->start,
5092 em->start + em->len);
5093 free_extent_map(em);
5094 return 1;
5095 }
5096
5097 map = em->map_lookup;
5098 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5099 ret = map->num_stripes;
5100 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5101 ret = map->sub_stripes;
5102 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5103 ret = 2;
5104 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5105 ret = 3;
5106 else
5107 ret = 1;
5108 free_extent_map(em);
5109
5110 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5111 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5112 ret++;
5113 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5114
5115 return ret;
5116}
5117
5118unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5119 struct btrfs_mapping_tree *map_tree,
5120 u64 logical)
5121{
5122 struct extent_map *em;
5123 struct map_lookup *map;
5124 struct extent_map_tree *em_tree = &map_tree->map_tree;
5125 unsigned long len = root->sectorsize;
5126
5127 read_lock(&em_tree->lock);
5128 em = lookup_extent_mapping(em_tree, logical, len);
5129 read_unlock(&em_tree->lock);
5130 BUG_ON(!em);
5131
5132 BUG_ON(em->start > logical || em->start + em->len < logical);
5133 map = em->map_lookup;
5134 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5135 len = map->stripe_len * nr_data_stripes(map);
5136 free_extent_map(em);
5137 return len;
5138}
5139
5140int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5141 u64 logical, u64 len, int mirror_num)
5142{
5143 struct extent_map *em;
5144 struct map_lookup *map;
5145 struct extent_map_tree *em_tree = &map_tree->map_tree;
5146 int ret = 0;
5147
5148 read_lock(&em_tree->lock);
5149 em = lookup_extent_mapping(em_tree, logical, len);
5150 read_unlock(&em_tree->lock);
5151 BUG_ON(!em);
5152
5153 BUG_ON(em->start > logical || em->start + em->len < logical);
5154 map = em->map_lookup;
5155 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5156 ret = 1;
5157 free_extent_map(em);
5158 return ret;
5159}
5160
5161static int find_live_mirror(struct btrfs_fs_info *fs_info,
5162 struct map_lookup *map, int first, int num,
5163 int optimal, int dev_replace_is_ongoing)
5164{
5165 int i;
5166 int tolerance;
5167 struct btrfs_device *srcdev;
5168
5169 if (dev_replace_is_ongoing &&
5170 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5171 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5172 srcdev = fs_info->dev_replace.srcdev;
5173 else
5174 srcdev = NULL;
5175
5176 /*
5177 * try to avoid the drive that is the source drive for a
5178 * dev-replace procedure, only choose it if no other non-missing
5179 * mirror is available
5180 */
5181 for (tolerance = 0; tolerance < 2; tolerance++) {
5182 if (map->stripes[optimal].dev->bdev &&
5183 (tolerance || map->stripes[optimal].dev != srcdev))
5184 return optimal;
5185 for (i = first; i < first + num; i++) {
5186 if (map->stripes[i].dev->bdev &&
5187 (tolerance || map->stripes[i].dev != srcdev))
5188 return i;
5189 }
5190 }
5191
5192 /* we couldn't find one that doesn't fail. Just return something
5193 * and the io error handling code will clean up eventually
5194 */
5195 return optimal;
5196}
5197
5198static inline int parity_smaller(u64 a, u64 b)
5199{
5200 return a > b;
5201}
5202
5203/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5204static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5205{
5206 struct btrfs_bio_stripe s;
5207 int i;
5208 u64 l;
5209 int again = 1;
5210
5211 while (again) {
5212 again = 0;
5213 for (i = 0; i < num_stripes - 1; i++) {
5214 if (parity_smaller(bbio->raid_map[i],
5215 bbio->raid_map[i+1])) {
5216 s = bbio->stripes[i];
5217 l = bbio->raid_map[i];
5218 bbio->stripes[i] = bbio->stripes[i+1];
5219 bbio->raid_map[i] = bbio->raid_map[i+1];
5220 bbio->stripes[i+1] = s;
5221 bbio->raid_map[i+1] = l;
5222
5223 again = 1;
5224 }
5225 }
5226 }
5227}
5228
5229static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5230{
5231 struct btrfs_bio *bbio = kzalloc(
5232 /* the size of the btrfs_bio */
5233 sizeof(struct btrfs_bio) +
5234 /* plus the variable array for the stripes */
5235 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5236 /* plus the variable array for the tgt dev */
5237 sizeof(int) * (real_stripes) +
5238 /*
5239 * plus the raid_map, which includes both the tgt dev
5240 * and the stripes
5241 */
5242 sizeof(u64) * (total_stripes),
5243 GFP_NOFS|__GFP_NOFAIL);
5244
5245 atomic_set(&bbio->error, 0);
5246 atomic_set(&bbio->refs, 1);
5247
5248 return bbio;
5249}
5250
5251void btrfs_get_bbio(struct btrfs_bio *bbio)
5252{
5253 WARN_ON(!atomic_read(&bbio->refs));
5254 atomic_inc(&bbio->refs);
5255}
5256
5257void btrfs_put_bbio(struct btrfs_bio *bbio)
5258{
5259 if (!bbio)
5260 return;
5261 if (atomic_dec_and_test(&bbio->refs))
5262 kfree(bbio);
5263}
5264
5265static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5266 u64 logical, u64 *length,
5267 struct btrfs_bio **bbio_ret,
5268 int mirror_num, int need_raid_map)
5269{
5270 struct extent_map *em;
5271 struct map_lookup *map;
5272 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5273 struct extent_map_tree *em_tree = &map_tree->map_tree;
5274 u64 offset;
5275 u64 stripe_offset;
5276 u64 stripe_end_offset;
5277 u64 stripe_nr;
5278 u64 stripe_nr_orig;
5279 u64 stripe_nr_end;
5280 u64 stripe_len;
5281 u32 stripe_index;
5282 int i;
5283 int ret = 0;
5284 int num_stripes;
5285 int max_errors = 0;
5286 int tgtdev_indexes = 0;
5287 struct btrfs_bio *bbio = NULL;
5288 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5289 int dev_replace_is_ongoing = 0;
5290 int num_alloc_stripes;
5291 int patch_the_first_stripe_for_dev_replace = 0;
5292 u64 physical_to_patch_in_first_stripe = 0;
5293 u64 raid56_full_stripe_start = (u64)-1;
5294
5295 read_lock(&em_tree->lock);
5296 em = lookup_extent_mapping(em_tree, logical, *length);
5297 read_unlock(&em_tree->lock);
5298
5299 if (!em) {
5300 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5301 logical, *length);
5302 return -EINVAL;
5303 }
5304
5305 if (em->start > logical || em->start + em->len < logical) {
5306 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5307 "found %Lu-%Lu", logical, em->start,
5308 em->start + em->len);
5309 free_extent_map(em);
5310 return -EINVAL;
5311 }
5312
5313 map = em->map_lookup;
5314 offset = logical - em->start;
5315
5316 stripe_len = map->stripe_len;
5317 stripe_nr = offset;
5318 /*
5319 * stripe_nr counts the total number of stripes we have to stride
5320 * to get to this block
5321 */
5322 stripe_nr = div64_u64(stripe_nr, stripe_len);
5323
5324 stripe_offset = stripe_nr * stripe_len;
5325 BUG_ON(offset < stripe_offset);
5326
5327 /* stripe_offset is the offset of this block in its stripe*/
5328 stripe_offset = offset - stripe_offset;
5329
5330 /* if we're here for raid56, we need to know the stripe aligned start */
5331 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5332 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5333 raid56_full_stripe_start = offset;
5334
5335 /* allow a write of a full stripe, but make sure we don't
5336 * allow straddling of stripes
5337 */
5338 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5339 full_stripe_len);
5340 raid56_full_stripe_start *= full_stripe_len;
5341 }
5342
5343 if (rw & REQ_DISCARD) {
5344 /* we don't discard raid56 yet */
5345 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5346 ret = -EOPNOTSUPP;
5347 goto out;
5348 }
5349 *length = min_t(u64, em->len - offset, *length);
5350 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5351 u64 max_len;
5352 /* For writes to RAID[56], allow a full stripeset across all disks.
5353 For other RAID types and for RAID[56] reads, just allow a single
5354 stripe (on a single disk). */
5355 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5356 (rw & REQ_WRITE)) {
5357 max_len = stripe_len * nr_data_stripes(map) -
5358 (offset - raid56_full_stripe_start);
5359 } else {
5360 /* we limit the length of each bio to what fits in a stripe */
5361 max_len = stripe_len - stripe_offset;
5362 }
5363 *length = min_t(u64, em->len - offset, max_len);
5364 } else {
5365 *length = em->len - offset;
5366 }
5367
5368 /* This is for when we're called from btrfs_merge_bio_hook() and all
5369 it cares about is the length */
5370 if (!bbio_ret)
5371 goto out;
5372
5373 btrfs_dev_replace_lock(dev_replace, 0);
5374 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5375 if (!dev_replace_is_ongoing)
5376 btrfs_dev_replace_unlock(dev_replace, 0);
5377 else
5378 btrfs_dev_replace_set_lock_blocking(dev_replace);
5379
5380 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5381 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5382 dev_replace->tgtdev != NULL) {
5383 /*
5384 * in dev-replace case, for repair case (that's the only
5385 * case where the mirror is selected explicitly when
5386 * calling btrfs_map_block), blocks left of the left cursor
5387 * can also be read from the target drive.
5388 * For REQ_GET_READ_MIRRORS, the target drive is added as
5389 * the last one to the array of stripes. For READ, it also
5390 * needs to be supported using the same mirror number.
5391 * If the requested block is not left of the left cursor,
5392 * EIO is returned. This can happen because btrfs_num_copies()
5393 * returns one more in the dev-replace case.
5394 */
5395 u64 tmp_length = *length;
5396 struct btrfs_bio *tmp_bbio = NULL;
5397 int tmp_num_stripes;
5398 u64 srcdev_devid = dev_replace->srcdev->devid;
5399 int index_srcdev = 0;
5400 int found = 0;
5401 u64 physical_of_found = 0;
5402
5403 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5404 logical, &tmp_length, &tmp_bbio, 0, 0);
5405 if (ret) {
5406 WARN_ON(tmp_bbio != NULL);
5407 goto out;
5408 }
5409
5410 tmp_num_stripes = tmp_bbio->num_stripes;
5411 if (mirror_num > tmp_num_stripes) {
5412 /*
5413 * REQ_GET_READ_MIRRORS does not contain this
5414 * mirror, that means that the requested area
5415 * is not left of the left cursor
5416 */
5417 ret = -EIO;
5418 btrfs_put_bbio(tmp_bbio);
5419 goto out;
5420 }
5421
5422 /*
5423 * process the rest of the function using the mirror_num
5424 * of the source drive. Therefore look it up first.
5425 * At the end, patch the device pointer to the one of the
5426 * target drive.
5427 */
5428 for (i = 0; i < tmp_num_stripes; i++) {
5429 if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5430 continue;
5431
5432 /*
5433 * In case of DUP, in order to keep it simple, only add
5434 * the mirror with the lowest physical address
5435 */
5436 if (found &&
5437 physical_of_found <= tmp_bbio->stripes[i].physical)
5438 continue;
5439
5440 index_srcdev = i;
5441 found = 1;
5442 physical_of_found = tmp_bbio->stripes[i].physical;
5443 }
5444
5445 btrfs_put_bbio(tmp_bbio);
5446
5447 if (!found) {
5448 WARN_ON(1);
5449 ret = -EIO;
5450 goto out;
5451 }
5452
5453 mirror_num = index_srcdev + 1;
5454 patch_the_first_stripe_for_dev_replace = 1;
5455 physical_to_patch_in_first_stripe = physical_of_found;
5456 } else if (mirror_num > map->num_stripes) {
5457 mirror_num = 0;
5458 }
5459
5460 num_stripes = 1;
5461 stripe_index = 0;
5462 stripe_nr_orig = stripe_nr;
5463 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5464 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5465 stripe_end_offset = stripe_nr_end * map->stripe_len -
5466 (offset + *length);
5467
5468 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5469 if (rw & REQ_DISCARD)
5470 num_stripes = min_t(u64, map->num_stripes,
5471 stripe_nr_end - stripe_nr_orig);
5472 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5473 &stripe_index);
5474 if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5475 mirror_num = 1;
5476 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5477 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5478 num_stripes = map->num_stripes;
5479 else if (mirror_num)
5480 stripe_index = mirror_num - 1;
5481 else {
5482 stripe_index = find_live_mirror(fs_info, map, 0,
5483 map->num_stripes,
5484 current->pid % map->num_stripes,
5485 dev_replace_is_ongoing);
5486 mirror_num = stripe_index + 1;
5487 }
5488
5489 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5490 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5491 num_stripes = map->num_stripes;
5492 } else if (mirror_num) {
5493 stripe_index = mirror_num - 1;
5494 } else {
5495 mirror_num = 1;
5496 }
5497
5498 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5499 u32 factor = map->num_stripes / map->sub_stripes;
5500
5501 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5502 stripe_index *= map->sub_stripes;
5503
5504 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5505 num_stripes = map->sub_stripes;
5506 else if (rw & REQ_DISCARD)
5507 num_stripes = min_t(u64, map->sub_stripes *
5508 (stripe_nr_end - stripe_nr_orig),
5509 map->num_stripes);
5510 else if (mirror_num)
5511 stripe_index += mirror_num - 1;
5512 else {
5513 int old_stripe_index = stripe_index;
5514 stripe_index = find_live_mirror(fs_info, map,
5515 stripe_index,
5516 map->sub_stripes, stripe_index +
5517 current->pid % map->sub_stripes,
5518 dev_replace_is_ongoing);
5519 mirror_num = stripe_index - old_stripe_index + 1;
5520 }
5521
5522 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5523 if (need_raid_map &&
5524 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5525 mirror_num > 1)) {
5526 /* push stripe_nr back to the start of the full stripe */
5527 stripe_nr = div_u64(raid56_full_stripe_start,
5528 stripe_len * nr_data_stripes(map));
5529
5530 /* RAID[56] write or recovery. Return all stripes */
5531 num_stripes = map->num_stripes;
5532 max_errors = nr_parity_stripes(map);
5533
5534 *length = map->stripe_len;
5535 stripe_index = 0;
5536 stripe_offset = 0;
5537 } else {
5538 /*
5539 * Mirror #0 or #1 means the original data block.
5540 * Mirror #2 is RAID5 parity block.
5541 * Mirror #3 is RAID6 Q block.
5542 */
5543 stripe_nr = div_u64_rem(stripe_nr,
5544 nr_data_stripes(map), &stripe_index);
5545 if (mirror_num > 1)
5546 stripe_index = nr_data_stripes(map) +
5547 mirror_num - 2;
5548
5549 /* We distribute the parity blocks across stripes */
5550 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5551 &stripe_index);
5552 if (!(rw & (REQ_WRITE | REQ_DISCARD |
5553 REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5554 mirror_num = 1;
5555 }
5556 } else {
5557 /*
5558 * after this, stripe_nr is the number of stripes on this
5559 * device we have to walk to find the data, and stripe_index is
5560 * the number of our device in the stripe array
5561 */
5562 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5563 &stripe_index);
5564 mirror_num = stripe_index + 1;
5565 }
5566 BUG_ON(stripe_index >= map->num_stripes);
5567
5568 num_alloc_stripes = num_stripes;
5569 if (dev_replace_is_ongoing) {
5570 if (rw & (REQ_WRITE | REQ_DISCARD))
5571 num_alloc_stripes <<= 1;
5572 if (rw & REQ_GET_READ_MIRRORS)
5573 num_alloc_stripes++;
5574 tgtdev_indexes = num_stripes;
5575 }
5576
5577 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5578 if (!bbio) {
5579 ret = -ENOMEM;
5580 goto out;
5581 }
5582 if (dev_replace_is_ongoing)
5583 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5584
5585 /* build raid_map */
5586 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5587 need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5588 mirror_num > 1)) {
5589 u64 tmp;
5590 unsigned rot;
5591
5592 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5593 sizeof(struct btrfs_bio_stripe) *
5594 num_alloc_stripes +
5595 sizeof(int) * tgtdev_indexes);
5596
5597 /* Work out the disk rotation on this stripe-set */
5598 div_u64_rem(stripe_nr, num_stripes, &rot);
5599
5600 /* Fill in the logical address of each stripe */
5601 tmp = stripe_nr * nr_data_stripes(map);
5602 for (i = 0; i < nr_data_stripes(map); i++)
5603 bbio->raid_map[(i+rot) % num_stripes] =
5604 em->start + (tmp + i) * map->stripe_len;
5605
5606 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5607 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5608 bbio->raid_map[(i+rot+1) % num_stripes] =
5609 RAID6_Q_STRIPE;
5610 }
5611
5612 if (rw & REQ_DISCARD) {
5613 u32 factor = 0;
5614 u32 sub_stripes = 0;
5615 u64 stripes_per_dev = 0;
5616 u32 remaining_stripes = 0;
5617 u32 last_stripe = 0;
5618
5619 if (map->type &
5620 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5621 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5622 sub_stripes = 1;
5623 else
5624 sub_stripes = map->sub_stripes;
5625
5626 factor = map->num_stripes / sub_stripes;
5627 stripes_per_dev = div_u64_rem(stripe_nr_end -
5628 stripe_nr_orig,
5629 factor,
5630 &remaining_stripes);
5631 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5632 last_stripe *= sub_stripes;
5633 }
5634
5635 for (i = 0; i < num_stripes; i++) {
5636 bbio->stripes[i].physical =
5637 map->stripes[stripe_index].physical +
5638 stripe_offset + stripe_nr * map->stripe_len;
5639 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5640
5641 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5642 BTRFS_BLOCK_GROUP_RAID10)) {
5643 bbio->stripes[i].length = stripes_per_dev *
5644 map->stripe_len;
5645
5646 if (i / sub_stripes < remaining_stripes)
5647 bbio->stripes[i].length +=
5648 map->stripe_len;
5649
5650 /*
5651 * Special for the first stripe and
5652 * the last stripe:
5653 *
5654 * |-------|...|-------|
5655 * |----------|
5656 * off end_off
5657 */
5658 if (i < sub_stripes)
5659 bbio->stripes[i].length -=
5660 stripe_offset;
5661
5662 if (stripe_index >= last_stripe &&
5663 stripe_index <= (last_stripe +
5664 sub_stripes - 1))
5665 bbio->stripes[i].length -=
5666 stripe_end_offset;
5667
5668 if (i == sub_stripes - 1)
5669 stripe_offset = 0;
5670 } else
5671 bbio->stripes[i].length = *length;
5672
5673 stripe_index++;
5674 if (stripe_index == map->num_stripes) {
5675 /* This could only happen for RAID0/10 */
5676 stripe_index = 0;
5677 stripe_nr++;
5678 }
5679 }
5680 } else {
5681 for (i = 0; i < num_stripes; i++) {
5682 bbio->stripes[i].physical =
5683 map->stripes[stripe_index].physical +
5684 stripe_offset +
5685 stripe_nr * map->stripe_len;
5686 bbio->stripes[i].dev =
5687 map->stripes[stripe_index].dev;
5688 stripe_index++;
5689 }
5690 }
5691
5692 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5693 max_errors = btrfs_chunk_max_errors(map);
5694
5695 if (bbio->raid_map)
5696 sort_parity_stripes(bbio, num_stripes);
5697
5698 tgtdev_indexes = 0;
5699 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5700 dev_replace->tgtdev != NULL) {
5701 int index_where_to_add;
5702 u64 srcdev_devid = dev_replace->srcdev->devid;
5703
5704 /*
5705 * duplicate the write operations while the dev replace
5706 * procedure is running. Since the copying of the old disk
5707 * to the new disk takes place at run time while the
5708 * filesystem is mounted writable, the regular write
5709 * operations to the old disk have to be duplicated to go
5710 * to the new disk as well.
5711 * Note that device->missing is handled by the caller, and
5712 * that the write to the old disk is already set up in the
5713 * stripes array.
5714 */
5715 index_where_to_add = num_stripes;
5716 for (i = 0; i < num_stripes; i++) {
5717 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5718 /* write to new disk, too */
5719 struct btrfs_bio_stripe *new =
5720 bbio->stripes + index_where_to_add;
5721 struct btrfs_bio_stripe *old =
5722 bbio->stripes + i;
5723
5724 new->physical = old->physical;
5725 new->length = old->length;
5726 new->dev = dev_replace->tgtdev;
5727 bbio->tgtdev_map[i] = index_where_to_add;
5728 index_where_to_add++;
5729 max_errors++;
5730 tgtdev_indexes++;
5731 }
5732 }
5733 num_stripes = index_where_to_add;
5734 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5735 dev_replace->tgtdev != NULL) {
5736 u64 srcdev_devid = dev_replace->srcdev->devid;
5737 int index_srcdev = 0;
5738 int found = 0;
5739 u64 physical_of_found = 0;
5740
5741 /*
5742 * During the dev-replace procedure, the target drive can
5743 * also be used to read data in case it is needed to repair
5744 * a corrupt block elsewhere. This is possible if the
5745 * requested area is left of the left cursor. In this area,
5746 * the target drive is a full copy of the source drive.
5747 */
5748 for (i = 0; i < num_stripes; i++) {
5749 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5750 /*
5751 * In case of DUP, in order to keep it
5752 * simple, only add the mirror with the
5753 * lowest physical address
5754 */
5755 if (found &&
5756 physical_of_found <=
5757 bbio->stripes[i].physical)
5758 continue;
5759 index_srcdev = i;
5760 found = 1;
5761 physical_of_found = bbio->stripes[i].physical;
5762 }
5763 }
5764 if (found) {
5765 if (physical_of_found + map->stripe_len <=
5766 dev_replace->cursor_left) {
5767 struct btrfs_bio_stripe *tgtdev_stripe =
5768 bbio->stripes + num_stripes;
5769
5770 tgtdev_stripe->physical = physical_of_found;
5771 tgtdev_stripe->length =
5772 bbio->stripes[index_srcdev].length;
5773 tgtdev_stripe->dev = dev_replace->tgtdev;
5774 bbio->tgtdev_map[index_srcdev] = num_stripes;
5775
5776 tgtdev_indexes++;
5777 num_stripes++;
5778 }
5779 }
5780 }
5781
5782 *bbio_ret = bbio;
5783 bbio->map_type = map->type;
5784 bbio->num_stripes = num_stripes;
5785 bbio->max_errors = max_errors;
5786 bbio->mirror_num = mirror_num;
5787 bbio->num_tgtdevs = tgtdev_indexes;
5788
5789 /*
5790 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5791 * mirror_num == num_stripes + 1 && dev_replace target drive is
5792 * available as a mirror
5793 */
5794 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5795 WARN_ON(num_stripes > 1);
5796 bbio->stripes[0].dev = dev_replace->tgtdev;
5797 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5798 bbio->mirror_num = map->num_stripes + 1;
5799 }
5800out:
5801 if (dev_replace_is_ongoing) {
5802 btrfs_dev_replace_clear_lock_blocking(dev_replace);
5803 btrfs_dev_replace_unlock(dev_replace, 0);
5804 }
5805 free_extent_map(em);
5806 return ret;
5807}
5808
5809int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5810 u64 logical, u64 *length,
5811 struct btrfs_bio **bbio_ret, int mirror_num)
5812{
5813 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5814 mirror_num, 0);
5815}
5816
5817/* For Scrub/replace */
5818int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5819 u64 logical, u64 *length,
5820 struct btrfs_bio **bbio_ret, int mirror_num,
5821 int need_raid_map)
5822{
5823 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5824 mirror_num, need_raid_map);
5825}
5826
5827int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5828 u64 chunk_start, u64 physical, u64 devid,
5829 u64 **logical, int *naddrs, int *stripe_len)
5830{
5831 struct extent_map_tree *em_tree = &map_tree->map_tree;
5832 struct extent_map *em;
5833 struct map_lookup *map;
5834 u64 *buf;
5835 u64 bytenr;
5836 u64 length;
5837 u64 stripe_nr;
5838 u64 rmap_len;
5839 int i, j, nr = 0;
5840
5841 read_lock(&em_tree->lock);
5842 em = lookup_extent_mapping(em_tree, chunk_start, 1);
5843 read_unlock(&em_tree->lock);
5844
5845 if (!em) {
5846 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5847 chunk_start);
5848 return -EIO;
5849 }
5850
5851 if (em->start != chunk_start) {
5852 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5853 em->start, chunk_start);
5854 free_extent_map(em);
5855 return -EIO;
5856 }
5857 map = em->map_lookup;
5858
5859 length = em->len;
5860 rmap_len = map->stripe_len;
5861
5862 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5863 length = div_u64(length, map->num_stripes / map->sub_stripes);
5864 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5865 length = div_u64(length, map->num_stripes);
5866 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5867 length = div_u64(length, nr_data_stripes(map));
5868 rmap_len = map->stripe_len * nr_data_stripes(map);
5869 }
5870
5871 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5872 BUG_ON(!buf); /* -ENOMEM */
5873
5874 for (i = 0; i < map->num_stripes; i++) {
5875 if (devid && map->stripes[i].dev->devid != devid)
5876 continue;
5877 if (map->stripes[i].physical > physical ||
5878 map->stripes[i].physical + length <= physical)
5879 continue;
5880
5881 stripe_nr = physical - map->stripes[i].physical;
5882 stripe_nr = div_u64(stripe_nr, map->stripe_len);
5883
5884 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5885 stripe_nr = stripe_nr * map->num_stripes + i;
5886 stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5887 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5888 stripe_nr = stripe_nr * map->num_stripes + i;
5889 } /* else if RAID[56], multiply by nr_data_stripes().
5890 * Alternatively, just use rmap_len below instead of
5891 * map->stripe_len */
5892
5893 bytenr = chunk_start + stripe_nr * rmap_len;
5894 WARN_ON(nr >= map->num_stripes);
5895 for (j = 0; j < nr; j++) {
5896 if (buf[j] == bytenr)
5897 break;
5898 }
5899 if (j == nr) {
5900 WARN_ON(nr >= map->num_stripes);
5901 buf[nr++] = bytenr;
5902 }
5903 }
5904
5905 *logical = buf;
5906 *naddrs = nr;
5907 *stripe_len = rmap_len;
5908
5909 free_extent_map(em);
5910 return 0;
5911}
5912
5913static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5914{
5915 bio->bi_private = bbio->private;
5916 bio->bi_end_io = bbio->end_io;
5917 bio_endio(bio);
5918
5919 btrfs_put_bbio(bbio);
5920}
5921
5922static void btrfs_end_bio(struct bio *bio)
5923{
5924 struct btrfs_bio *bbio = bio->bi_private;
5925 int is_orig_bio = 0;
5926
5927 if (bio->bi_error) {
5928 atomic_inc(&bbio->error);
5929 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5930 unsigned int stripe_index =
5931 btrfs_io_bio(bio)->stripe_index;
5932 struct btrfs_device *dev;
5933
5934 BUG_ON(stripe_index >= bbio->num_stripes);
5935 dev = bbio->stripes[stripe_index].dev;
5936 if (dev->bdev) {
5937 if (bio->bi_rw & WRITE)
5938 btrfs_dev_stat_inc(dev,
5939 BTRFS_DEV_STAT_WRITE_ERRS);
5940 else
5941 btrfs_dev_stat_inc(dev,
5942 BTRFS_DEV_STAT_READ_ERRS);
5943 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5944 btrfs_dev_stat_inc(dev,
5945 BTRFS_DEV_STAT_FLUSH_ERRS);
5946 btrfs_dev_stat_print_on_error(dev);
5947 }
5948 }
5949 }
5950
5951 if (bio == bbio->orig_bio)
5952 is_orig_bio = 1;
5953
5954 btrfs_bio_counter_dec(bbio->fs_info);
5955
5956 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5957 if (!is_orig_bio) {
5958 bio_put(bio);
5959 bio = bbio->orig_bio;
5960 }
5961
5962 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5963 /* only send an error to the higher layers if it is
5964 * beyond the tolerance of the btrfs bio
5965 */
5966 if (atomic_read(&bbio->error) > bbio->max_errors) {
5967 bio->bi_error = -EIO;
5968 } else {
5969 /*
5970 * this bio is actually up to date, we didn't
5971 * go over the max number of errors
5972 */
5973 bio->bi_error = 0;
5974 }
5975
5976 btrfs_end_bbio(bbio, bio);
5977 } else if (!is_orig_bio) {
5978 bio_put(bio);
5979 }
5980}
5981
5982/*
5983 * see run_scheduled_bios for a description of why bios are collected for
5984 * async submit.
5985 *
5986 * This will add one bio to the pending list for a device and make sure
5987 * the work struct is scheduled.
5988 */
5989static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5990 struct btrfs_device *device,
5991 int rw, struct bio *bio)
5992{
5993 int should_queue = 1;
5994 struct btrfs_pending_bios *pending_bios;
5995
5996 if (device->missing || !device->bdev) {
5997 bio_io_error(bio);
5998 return;
5999 }
6000
6001 /* don't bother with additional async steps for reads, right now */
6002 if (!(rw & REQ_WRITE)) {
6003 bio_get(bio);
6004 btrfsic_submit_bio(rw, bio);
6005 bio_put(bio);
6006 return;
6007 }
6008
6009 /*
6010 * nr_async_bios allows us to reliably return congestion to the
6011 * higher layers. Otherwise, the async bio makes it appear we have
6012 * made progress against dirty pages when we've really just put it
6013 * on a queue for later
6014 */
6015 atomic_inc(&root->fs_info->nr_async_bios);
6016 WARN_ON(bio->bi_next);
6017 bio->bi_next = NULL;
6018 bio->bi_rw |= rw;
6019
6020 spin_lock(&device->io_lock);
6021 if (bio->bi_rw & REQ_SYNC)
6022 pending_bios = &device->pending_sync_bios;
6023 else
6024 pending_bios = &device->pending_bios;
6025
6026 if (pending_bios->tail)
6027 pending_bios->tail->bi_next = bio;
6028
6029 pending_bios->tail = bio;
6030 if (!pending_bios->head)
6031 pending_bios->head = bio;
6032 if (device->running_pending)
6033 should_queue = 0;
6034
6035 spin_unlock(&device->io_lock);
6036
6037 if (should_queue)
6038 btrfs_queue_work(root->fs_info->submit_workers,
6039 &device->work);
6040}
6041
6042static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
6043 struct bio *bio, u64 physical, int dev_nr,
6044 int rw, int async)
6045{
6046 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6047
6048 bio->bi_private = bbio;
6049 btrfs_io_bio(bio)->stripe_index = dev_nr;
6050 bio->bi_end_io = btrfs_end_bio;
6051 bio->bi_iter.bi_sector = physical >> 9;
6052#ifdef DEBUG
6053 {
6054 struct rcu_string *name;
6055
6056 rcu_read_lock();
6057 name = rcu_dereference(dev->name);
6058 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6059 "(%s id %llu), size=%u\n", rw,
6060 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6061 name->str, dev->devid, bio->bi_iter.bi_size);
6062 rcu_read_unlock();
6063 }
6064#endif
6065 bio->bi_bdev = dev->bdev;
6066
6067 btrfs_bio_counter_inc_noblocked(root->fs_info);
6068
6069 if (async)
6070 btrfs_schedule_bio(root, dev, rw, bio);
6071 else
6072 btrfsic_submit_bio(rw, bio);
6073}
6074
6075static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6076{
6077 atomic_inc(&bbio->error);
6078 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6079 /* Shoud be the original bio. */
6080 WARN_ON(bio != bbio->orig_bio);
6081
6082 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6083 bio->bi_iter.bi_sector = logical >> 9;
6084 bio->bi_error = -EIO;
6085 btrfs_end_bbio(bbio, bio);
6086 }
6087}
6088
6089int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6090 int mirror_num, int async_submit)
6091{
6092 struct btrfs_device *dev;
6093 struct bio *first_bio = bio;
6094 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6095 u64 length = 0;
6096 u64 map_length;
6097 int ret;
6098 int dev_nr;
6099 int total_devs;
6100 struct btrfs_bio *bbio = NULL;
6101
6102 length = bio->bi_iter.bi_size;
6103 map_length = length;
6104
6105 btrfs_bio_counter_inc_blocked(root->fs_info);
6106 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6107 mirror_num, 1);
6108 if (ret) {
6109 btrfs_bio_counter_dec(root->fs_info);
6110 return ret;
6111 }
6112
6113 total_devs = bbio->num_stripes;
6114 bbio->orig_bio = first_bio;
6115 bbio->private = first_bio->bi_private;
6116 bbio->end_io = first_bio->bi_end_io;
6117 bbio->fs_info = root->fs_info;
6118 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6119
6120 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6121 ((rw & WRITE) || (mirror_num > 1))) {
6122 /* In this case, map_length has been set to the length of
6123 a single stripe; not the whole write */
6124 if (rw & WRITE) {
6125 ret = raid56_parity_write(root, bio, bbio, map_length);
6126 } else {
6127 ret = raid56_parity_recover(root, bio, bbio, map_length,
6128 mirror_num, 1);
6129 }
6130
6131 btrfs_bio_counter_dec(root->fs_info);
6132 return ret;
6133 }
6134
6135 if (map_length < length) {
6136 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6137 logical, length, map_length);
6138 BUG();
6139 }
6140
6141 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6142 dev = bbio->stripes[dev_nr].dev;
6143 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
6144 bbio_error(bbio, first_bio, logical);
6145 continue;
6146 }
6147
6148 if (dev_nr < total_devs - 1) {
6149 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6150 BUG_ON(!bio); /* -ENOMEM */
6151 } else
6152 bio = first_bio;
6153
6154 submit_stripe_bio(root, bbio, bio,
6155 bbio->stripes[dev_nr].physical, dev_nr, rw,
6156 async_submit);
6157 }
6158 btrfs_bio_counter_dec(root->fs_info);
6159 return 0;
6160}
6161
6162struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6163 u8 *uuid, u8 *fsid)
6164{
6165 struct btrfs_device *device;
6166 struct btrfs_fs_devices *cur_devices;
6167
6168 cur_devices = fs_info->fs_devices;
6169 while (cur_devices) {
6170 if (!fsid ||
6171 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6172 device = __find_device(&cur_devices->devices,
6173 devid, uuid);
6174 if (device)
6175 return device;
6176 }
6177 cur_devices = cur_devices->seed;
6178 }
6179 return NULL;
6180}
6181
6182static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6183 struct btrfs_fs_devices *fs_devices,
6184 u64 devid, u8 *dev_uuid)
6185{
6186 struct btrfs_device *device;
6187
6188 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6189 if (IS_ERR(device))
6190 return NULL;
6191
6192 list_add(&device->dev_list, &fs_devices->devices);
6193 device->fs_devices = fs_devices;
6194 fs_devices->num_devices++;
6195
6196 device->missing = 1;
6197 fs_devices->missing_devices++;
6198
6199 return device;
6200}
6201
6202/**
6203 * btrfs_alloc_device - allocate struct btrfs_device
6204 * @fs_info: used only for generating a new devid, can be NULL if
6205 * devid is provided (i.e. @devid != NULL).
6206 * @devid: a pointer to devid for this device. If NULL a new devid
6207 * is generated.
6208 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6209 * is generated.
6210 *
6211 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6212 * on error. Returned struct is not linked onto any lists and can be
6213 * destroyed with kfree() right away.
6214 */
6215struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6216 const u64 *devid,
6217 const u8 *uuid)
6218{
6219 struct btrfs_device *dev;
6220 u64 tmp;
6221
6222 if (WARN_ON(!devid && !fs_info))
6223 return ERR_PTR(-EINVAL);
6224
6225 dev = __alloc_device();
6226 if (IS_ERR(dev))
6227 return dev;
6228
6229 if (devid)
6230 tmp = *devid;
6231 else {
6232 int ret;
6233
6234 ret = find_next_devid(fs_info, &tmp);
6235 if (ret) {
6236 kfree(dev);
6237 return ERR_PTR(ret);
6238 }
6239 }
6240 dev->devid = tmp;
6241
6242 if (uuid)
6243 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6244 else
6245 generate_random_uuid(dev->uuid);
6246
6247 btrfs_init_work(&dev->work, btrfs_submit_helper,
6248 pending_bios_fn, NULL, NULL);
6249
6250 return dev;
6251}
6252
6253static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6254 struct extent_buffer *leaf,
6255 struct btrfs_chunk *chunk)
6256{
6257 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6258 struct map_lookup *map;
6259 struct extent_map *em;
6260 u64 logical;
6261 u64 length;
6262 u64 stripe_len;
6263 u64 devid;
6264 u8 uuid[BTRFS_UUID_SIZE];
6265 int num_stripes;
6266 int ret;
6267 int i;
6268
6269 logical = key->offset;
6270 length = btrfs_chunk_length(leaf, chunk);
6271 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6272 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6273 /* Validation check */
6274 if (!num_stripes) {
6275 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6276 num_stripes);
6277 return -EIO;
6278 }
6279 if (!IS_ALIGNED(logical, root->sectorsize)) {
6280 btrfs_err(root->fs_info,
6281 "invalid chunk logical %llu", logical);
6282 return -EIO;
6283 }
6284 if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6285 btrfs_err(root->fs_info,
6286 "invalid chunk length %llu", length);
6287 return -EIO;
6288 }
6289 if (!is_power_of_2(stripe_len)) {
6290 btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6291 stripe_len);
6292 return -EIO;
6293 }
6294 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6295 btrfs_chunk_type(leaf, chunk)) {
6296 btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6297 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6298 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6299 btrfs_chunk_type(leaf, chunk));
6300 return -EIO;
6301 }
6302
6303 read_lock(&map_tree->map_tree.lock);
6304 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6305 read_unlock(&map_tree->map_tree.lock);
6306
6307 /* already mapped? */
6308 if (em && em->start <= logical && em->start + em->len > logical) {
6309 free_extent_map(em);
6310 return 0;
6311 } else if (em) {
6312 free_extent_map(em);
6313 }
6314
6315 em = alloc_extent_map();
6316 if (!em)
6317 return -ENOMEM;
6318 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6319 if (!map) {
6320 free_extent_map(em);
6321 return -ENOMEM;
6322 }
6323
6324 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6325 em->map_lookup = map;
6326 em->start = logical;
6327 em->len = length;
6328 em->orig_start = 0;
6329 em->block_start = 0;
6330 em->block_len = em->len;
6331
6332 map->num_stripes = num_stripes;
6333 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6334 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6335 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6336 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6337 map->type = btrfs_chunk_type(leaf, chunk);
6338 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6339 for (i = 0; i < num_stripes; i++) {
6340 map->stripes[i].physical =
6341 btrfs_stripe_offset_nr(leaf, chunk, i);
6342 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6343 read_extent_buffer(leaf, uuid, (unsigned long)
6344 btrfs_stripe_dev_uuid_nr(chunk, i),
6345 BTRFS_UUID_SIZE);
6346 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6347 uuid, NULL);
6348 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6349 free_extent_map(em);
6350 return -EIO;
6351 }
6352 if (!map->stripes[i].dev) {
6353 map->stripes[i].dev =
6354 add_missing_dev(root, root->fs_info->fs_devices,
6355 devid, uuid);
6356 if (!map->stripes[i].dev) {
6357 free_extent_map(em);
6358 return -EIO;
6359 }
6360 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6361 devid, uuid);
6362 }
6363 map->stripes[i].dev->in_fs_metadata = 1;
6364 }
6365
6366 write_lock(&map_tree->map_tree.lock);
6367 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6368 write_unlock(&map_tree->map_tree.lock);
6369 BUG_ON(ret); /* Tree corruption */
6370 free_extent_map(em);
6371
6372 return 0;
6373}
6374
6375static void fill_device_from_item(struct extent_buffer *leaf,
6376 struct btrfs_dev_item *dev_item,
6377 struct btrfs_device *device)
6378{
6379 unsigned long ptr;
6380
6381 device->devid = btrfs_device_id(leaf, dev_item);
6382 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6383 device->total_bytes = device->disk_total_bytes;
6384 device->commit_total_bytes = device->disk_total_bytes;
6385 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6386 device->commit_bytes_used = device->bytes_used;
6387 device->type = btrfs_device_type(leaf, dev_item);
6388 device->io_align = btrfs_device_io_align(leaf, dev_item);
6389 device->io_width = btrfs_device_io_width(leaf, dev_item);
6390 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6391 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6392 device->is_tgtdev_for_dev_replace = 0;
6393
6394 ptr = btrfs_device_uuid(dev_item);
6395 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6396}
6397
6398static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6399 u8 *fsid)
6400{
6401 struct btrfs_fs_devices *fs_devices;
6402 int ret;
6403
6404 BUG_ON(!mutex_is_locked(&uuid_mutex));
6405
6406 fs_devices = root->fs_info->fs_devices->seed;
6407 while (fs_devices) {
6408 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6409 return fs_devices;
6410
6411 fs_devices = fs_devices->seed;
6412 }
6413
6414 fs_devices = find_fsid(fsid);
6415 if (!fs_devices) {
6416 if (!btrfs_test_opt(root, DEGRADED))
6417 return ERR_PTR(-ENOENT);
6418
6419 fs_devices = alloc_fs_devices(fsid);
6420 if (IS_ERR(fs_devices))
6421 return fs_devices;
6422
6423 fs_devices->seeding = 1;
6424 fs_devices->opened = 1;
6425 return fs_devices;
6426 }
6427
6428 fs_devices = clone_fs_devices(fs_devices);
6429 if (IS_ERR(fs_devices))
6430 return fs_devices;
6431
6432 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6433 root->fs_info->bdev_holder);
6434 if (ret) {
6435 free_fs_devices(fs_devices);
6436 fs_devices = ERR_PTR(ret);
6437 goto out;
6438 }
6439
6440 if (!fs_devices->seeding) {
6441 __btrfs_close_devices(fs_devices);
6442 free_fs_devices(fs_devices);
6443 fs_devices = ERR_PTR(-EINVAL);
6444 goto out;
6445 }
6446
6447 fs_devices->seed = root->fs_info->fs_devices->seed;
6448 root->fs_info->fs_devices->seed = fs_devices;
6449out:
6450 return fs_devices;
6451}
6452
6453static int read_one_dev(struct btrfs_root *root,
6454 struct extent_buffer *leaf,
6455 struct btrfs_dev_item *dev_item)
6456{
6457 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6458 struct btrfs_device *device;
6459 u64 devid;
6460 int ret;
6461 u8 fs_uuid[BTRFS_UUID_SIZE];
6462 u8 dev_uuid[BTRFS_UUID_SIZE];
6463
6464 devid = btrfs_device_id(leaf, dev_item);
6465 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6466 BTRFS_UUID_SIZE);
6467 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6468 BTRFS_UUID_SIZE);
6469
6470 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6471 fs_devices = open_seed_devices(root, fs_uuid);
6472 if (IS_ERR(fs_devices))
6473 return PTR_ERR(fs_devices);
6474 }
6475
6476 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6477 if (!device) {
6478 if (!btrfs_test_opt(root, DEGRADED))
6479 return -EIO;
6480
6481 device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6482 if (!device)
6483 return -ENOMEM;
6484 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6485 devid, dev_uuid);
6486 } else {
6487 if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6488 return -EIO;
6489
6490 if(!device->bdev && !device->missing) {
6491 /*
6492 * this happens when a device that was properly setup
6493 * in the device info lists suddenly goes bad.
6494 * device->bdev is NULL, and so we have to set
6495 * device->missing to one here
6496 */
6497 device->fs_devices->missing_devices++;
6498 device->missing = 1;
6499 }
6500
6501 /* Move the device to its own fs_devices */
6502 if (device->fs_devices != fs_devices) {
6503 ASSERT(device->missing);
6504
6505 list_move(&device->dev_list, &fs_devices->devices);
6506 device->fs_devices->num_devices--;
6507 fs_devices->num_devices++;
6508
6509 device->fs_devices->missing_devices--;
6510 fs_devices->missing_devices++;
6511
6512 device->fs_devices = fs_devices;
6513 }
6514 }
6515
6516 if (device->fs_devices != root->fs_info->fs_devices) {
6517 BUG_ON(device->writeable);
6518 if (device->generation !=
6519 btrfs_device_generation(leaf, dev_item))
6520 return -EINVAL;
6521 }
6522
6523 fill_device_from_item(leaf, dev_item, device);
6524 device->in_fs_metadata = 1;
6525 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6526 device->fs_devices->total_rw_bytes += device->total_bytes;
6527 spin_lock(&root->fs_info->free_chunk_lock);
6528 root->fs_info->free_chunk_space += device->total_bytes -
6529 device->bytes_used;
6530 spin_unlock(&root->fs_info->free_chunk_lock);
6531 }
6532 ret = 0;
6533 return ret;
6534}
6535
6536int btrfs_read_sys_array(struct btrfs_root *root)
6537{
6538 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6539 struct extent_buffer *sb;
6540 struct btrfs_disk_key *disk_key;
6541 struct btrfs_chunk *chunk;
6542 u8 *array_ptr;
6543 unsigned long sb_array_offset;
6544 int ret = 0;
6545 u32 num_stripes;
6546 u32 array_size;
6547 u32 len = 0;
6548 u32 cur_offset;
6549 struct btrfs_key key;
6550
6551 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6552 /*
6553 * This will create extent buffer of nodesize, superblock size is
6554 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6555 * overallocate but we can keep it as-is, only the first page is used.
6556 */
6557 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6558 if (!sb)
6559 return -ENOMEM;
6560 set_extent_buffer_uptodate(sb);
6561 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6562 /*
6563 * The sb extent buffer is artifical and just used to read the system array.
6564 * set_extent_buffer_uptodate() call does not properly mark all it's
6565 * pages up-to-date when the page is larger: extent does not cover the
6566 * whole page and consequently check_page_uptodate does not find all
6567 * the page's extents up-to-date (the hole beyond sb),
6568 * write_extent_buffer then triggers a WARN_ON.
6569 *
6570 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6571 * but sb spans only this function. Add an explicit SetPageUptodate call
6572 * to silence the warning eg. on PowerPC 64.
6573 */
6574 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6575 SetPageUptodate(sb->pages[0]);
6576
6577 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6578 array_size = btrfs_super_sys_array_size(super_copy);
6579
6580 array_ptr = super_copy->sys_chunk_array;
6581 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6582 cur_offset = 0;
6583
6584 while (cur_offset < array_size) {
6585 disk_key = (struct btrfs_disk_key *)array_ptr;
6586 len = sizeof(*disk_key);
6587 if (cur_offset + len > array_size)
6588 goto out_short_read;
6589
6590 btrfs_disk_key_to_cpu(&key, disk_key);
6591
6592 array_ptr += len;
6593 sb_array_offset += len;
6594 cur_offset += len;
6595
6596 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6597 chunk = (struct btrfs_chunk *)sb_array_offset;
6598 /*
6599 * At least one btrfs_chunk with one stripe must be
6600 * present, exact stripe count check comes afterwards
6601 */
6602 len = btrfs_chunk_item_size(1);
6603 if (cur_offset + len > array_size)
6604 goto out_short_read;
6605
6606 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6607 if (!num_stripes) {
6608 printk(KERN_ERR
6609 "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6610 num_stripes, cur_offset);
6611 ret = -EIO;
6612 break;
6613 }
6614
6615 len = btrfs_chunk_item_size(num_stripes);
6616 if (cur_offset + len > array_size)
6617 goto out_short_read;
6618
6619 ret = read_one_chunk(root, &key, sb, chunk);
6620 if (ret)
6621 break;
6622 } else {
6623 printk(KERN_ERR
6624 "BTRFS: unexpected item type %u in sys_array at offset %u\n",
6625 (u32)key.type, cur_offset);
6626 ret = -EIO;
6627 break;
6628 }
6629 array_ptr += len;
6630 sb_array_offset += len;
6631 cur_offset += len;
6632 }
6633 free_extent_buffer(sb);
6634 return ret;
6635
6636out_short_read:
6637 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6638 len, cur_offset);
6639 free_extent_buffer(sb);
6640 return -EIO;
6641}
6642
6643int btrfs_read_chunk_tree(struct btrfs_root *root)
6644{
6645 struct btrfs_path *path;
6646 struct extent_buffer *leaf;
6647 struct btrfs_key key;
6648 struct btrfs_key found_key;
6649 int ret;
6650 int slot;
6651
6652 root = root->fs_info->chunk_root;
6653
6654 path = btrfs_alloc_path();
6655 if (!path)
6656 return -ENOMEM;
6657
6658 mutex_lock(&uuid_mutex);
6659 lock_chunks(root);
6660
6661 /*
6662 * Read all device items, and then all the chunk items. All
6663 * device items are found before any chunk item (their object id
6664 * is smaller than the lowest possible object id for a chunk
6665 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6666 */
6667 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6668 key.offset = 0;
6669 key.type = 0;
6670 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6671 if (ret < 0)
6672 goto error;
6673 while (1) {
6674 leaf = path->nodes[0];
6675 slot = path->slots[0];
6676 if (slot >= btrfs_header_nritems(leaf)) {
6677 ret = btrfs_next_leaf(root, path);
6678 if (ret == 0)
6679 continue;
6680 if (ret < 0)
6681 goto error;
6682 break;
6683 }
6684 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6685 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6686 struct btrfs_dev_item *dev_item;
6687 dev_item = btrfs_item_ptr(leaf, slot,
6688 struct btrfs_dev_item);
6689 ret = read_one_dev(root, leaf, dev_item);
6690 if (ret)
6691 goto error;
6692 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6693 struct btrfs_chunk *chunk;
6694 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6695 ret = read_one_chunk(root, &found_key, leaf, chunk);
6696 if (ret)
6697 goto error;
6698 }
6699 path->slots[0]++;
6700 }
6701 ret = 0;
6702error:
6703 unlock_chunks(root);
6704 mutex_unlock(&uuid_mutex);
6705
6706 btrfs_free_path(path);
6707 return ret;
6708}
6709
6710void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6711{
6712 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6713 struct btrfs_device *device;
6714
6715 while (fs_devices) {
6716 mutex_lock(&fs_devices->device_list_mutex);
6717 list_for_each_entry(device, &fs_devices->devices, dev_list)
6718 device->dev_root = fs_info->dev_root;
6719 mutex_unlock(&fs_devices->device_list_mutex);
6720
6721 fs_devices = fs_devices->seed;
6722 }
6723}
6724
6725static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6726{
6727 int i;
6728
6729 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6730 btrfs_dev_stat_reset(dev, i);
6731}
6732
6733int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6734{
6735 struct btrfs_key key;
6736 struct btrfs_key found_key;
6737 struct btrfs_root *dev_root = fs_info->dev_root;
6738 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6739 struct extent_buffer *eb;
6740 int slot;
6741 int ret = 0;
6742 struct btrfs_device *device;
6743 struct btrfs_path *path = NULL;
6744 int i;
6745
6746 path = btrfs_alloc_path();
6747 if (!path) {
6748 ret = -ENOMEM;
6749 goto out;
6750 }
6751
6752 mutex_lock(&fs_devices->device_list_mutex);
6753 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6754 int item_size;
6755 struct btrfs_dev_stats_item *ptr;
6756
6757 key.objectid = BTRFS_DEV_STATS_OBJECTID;
6758 key.type = BTRFS_PERSISTENT_ITEM_KEY;
6759 key.offset = device->devid;
6760 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6761 if (ret) {
6762 __btrfs_reset_dev_stats(device);
6763 device->dev_stats_valid = 1;
6764 btrfs_release_path(path);
6765 continue;
6766 }
6767 slot = path->slots[0];
6768 eb = path->nodes[0];
6769 btrfs_item_key_to_cpu(eb, &found_key, slot);
6770 item_size = btrfs_item_size_nr(eb, slot);
6771
6772 ptr = btrfs_item_ptr(eb, slot,
6773 struct btrfs_dev_stats_item);
6774
6775 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6776 if (item_size >= (1 + i) * sizeof(__le64))
6777 btrfs_dev_stat_set(device, i,
6778 btrfs_dev_stats_value(eb, ptr, i));
6779 else
6780 btrfs_dev_stat_reset(device, i);
6781 }
6782
6783 device->dev_stats_valid = 1;
6784 btrfs_dev_stat_print_on_load(device);
6785 btrfs_release_path(path);
6786 }
6787 mutex_unlock(&fs_devices->device_list_mutex);
6788
6789out:
6790 btrfs_free_path(path);
6791 return ret < 0 ? ret : 0;
6792}
6793
6794static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6795 struct btrfs_root *dev_root,
6796 struct btrfs_device *device)
6797{
6798 struct btrfs_path *path;
6799 struct btrfs_key key;
6800 struct extent_buffer *eb;
6801 struct btrfs_dev_stats_item *ptr;
6802 int ret;
6803 int i;
6804
6805 key.objectid = BTRFS_DEV_STATS_OBJECTID;
6806 key.type = BTRFS_PERSISTENT_ITEM_KEY;
6807 key.offset = device->devid;
6808
6809 path = btrfs_alloc_path();
6810 BUG_ON(!path);
6811 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6812 if (ret < 0) {
6813 btrfs_warn_in_rcu(dev_root->fs_info,
6814 "error %d while searching for dev_stats item for device %s",
6815 ret, rcu_str_deref(device->name));
6816 goto out;
6817 }
6818
6819 if (ret == 0 &&
6820 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6821 /* need to delete old one and insert a new one */
6822 ret = btrfs_del_item(trans, dev_root, path);
6823 if (ret != 0) {
6824 btrfs_warn_in_rcu(dev_root->fs_info,
6825 "delete too small dev_stats item for device %s failed %d",
6826 rcu_str_deref(device->name), ret);
6827 goto out;
6828 }
6829 ret = 1;
6830 }
6831
6832 if (ret == 1) {
6833 /* need to insert a new item */
6834 btrfs_release_path(path);
6835 ret = btrfs_insert_empty_item(trans, dev_root, path,
6836 &key, sizeof(*ptr));
6837 if (ret < 0) {
6838 btrfs_warn_in_rcu(dev_root->fs_info,
6839 "insert dev_stats item for device %s failed %d",
6840 rcu_str_deref(device->name), ret);
6841 goto out;
6842 }
6843 }
6844
6845 eb = path->nodes[0];
6846 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6847 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6848 btrfs_set_dev_stats_value(eb, ptr, i,
6849 btrfs_dev_stat_read(device, i));
6850 btrfs_mark_buffer_dirty(eb);
6851
6852out:
6853 btrfs_free_path(path);
6854 return ret;
6855}
6856
6857/*
6858 * called from commit_transaction. Writes all changed device stats to disk.
6859 */
6860int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6861 struct btrfs_fs_info *fs_info)
6862{
6863 struct btrfs_root *dev_root = fs_info->dev_root;
6864 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6865 struct btrfs_device *device;
6866 int stats_cnt;
6867 int ret = 0;
6868
6869 mutex_lock(&fs_devices->device_list_mutex);
6870 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6871 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6872 continue;
6873
6874 stats_cnt = atomic_read(&device->dev_stats_ccnt);
6875 ret = update_dev_stat_item(trans, dev_root, device);
6876 if (!ret)
6877 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6878 }
6879 mutex_unlock(&fs_devices->device_list_mutex);
6880
6881 return ret;
6882}
6883
6884void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6885{
6886 btrfs_dev_stat_inc(dev, index);
6887 btrfs_dev_stat_print_on_error(dev);
6888}
6889
6890static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6891{
6892 if (!dev->dev_stats_valid)
6893 return;
6894 btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
6895 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6896 rcu_str_deref(dev->name),
6897 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6898 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6899 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6900 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6901 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6902}
6903
6904static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6905{
6906 int i;
6907
6908 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6909 if (btrfs_dev_stat_read(dev, i) != 0)
6910 break;
6911 if (i == BTRFS_DEV_STAT_VALUES_MAX)
6912 return; /* all values == 0, suppress message */
6913
6914 btrfs_info_in_rcu(dev->dev_root->fs_info,
6915 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6916 rcu_str_deref(dev->name),
6917 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6918 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6919 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6920 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6921 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6922}
6923
6924int btrfs_get_dev_stats(struct btrfs_root *root,
6925 struct btrfs_ioctl_get_dev_stats *stats)
6926{
6927 struct btrfs_device *dev;
6928 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6929 int i;
6930
6931 mutex_lock(&fs_devices->device_list_mutex);
6932 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6933 mutex_unlock(&fs_devices->device_list_mutex);
6934
6935 if (!dev) {
6936 btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6937 return -ENODEV;
6938 } else if (!dev->dev_stats_valid) {
6939 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6940 return -ENODEV;
6941 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6942 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6943 if (stats->nr_items > i)
6944 stats->values[i] =
6945 btrfs_dev_stat_read_and_reset(dev, i);
6946 else
6947 btrfs_dev_stat_reset(dev, i);
6948 }
6949 } else {
6950 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6951 if (stats->nr_items > i)
6952 stats->values[i] = btrfs_dev_stat_read(dev, i);
6953 }
6954 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6955 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6956 return 0;
6957}
6958
6959void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6960{
6961 struct buffer_head *bh;
6962 struct btrfs_super_block *disk_super;
6963 int copy_num;
6964
6965 if (!bdev)
6966 return;
6967
6968 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
6969 copy_num++) {
6970
6971 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
6972 continue;
6973
6974 disk_super = (struct btrfs_super_block *)bh->b_data;
6975
6976 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6977 set_buffer_dirty(bh);
6978 sync_dirty_buffer(bh);
6979 brelse(bh);
6980 }
6981
6982 /* Notify udev that device has changed */
6983 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
6984
6985 /* Update ctime/mtime for device path for libblkid */
6986 update_dev_time(device_path);
6987}
6988
6989/*
6990 * Update the size of all devices, which is used for writing out the
6991 * super blocks.
6992 */
6993void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6994{
6995 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6996 struct btrfs_device *curr, *next;
6997
6998 if (list_empty(&fs_devices->resized_devices))
6999 return;
7000
7001 mutex_lock(&fs_devices->device_list_mutex);
7002 lock_chunks(fs_info->dev_root);
7003 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7004 resized_list) {
7005 list_del_init(&curr->resized_list);
7006 curr->commit_total_bytes = curr->disk_total_bytes;
7007 }
7008 unlock_chunks(fs_info->dev_root);
7009 mutex_unlock(&fs_devices->device_list_mutex);
7010}
7011
7012/* Must be invoked during the transaction commit */
7013void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
7014 struct btrfs_transaction *transaction)
7015{
7016 struct extent_map *em;
7017 struct map_lookup *map;
7018 struct btrfs_device *dev;
7019 int i;
7020
7021 if (list_empty(&transaction->pending_chunks))
7022 return;
7023
7024 /* In order to kick the device replace finish process */
7025 lock_chunks(root);
7026 list_for_each_entry(em, &transaction->pending_chunks, list) {
7027 map = em->map_lookup;
7028
7029 for (i = 0; i < map->num_stripes; i++) {
7030 dev = map->stripes[i].dev;
7031 dev->commit_bytes_used = dev->bytes_used;
7032 }
7033 }
7034 unlock_chunks(root);
7035}
7036
7037void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7038{
7039 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7040 while (fs_devices) {
7041 fs_devices->fs_info = fs_info;
7042 fs_devices = fs_devices->seed;
7043 }
7044}
7045
7046void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7047{
7048 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7049 while (fs_devices) {
7050 fs_devices->fs_info = NULL;
7051 fs_devices = fs_devices->seed;
7052 }
7053}
7054
7055static void btrfs_close_one_device(struct btrfs_device *device)
7056{
7057 struct btrfs_fs_devices *fs_devices = device->fs_devices;
7058 struct btrfs_device *new_device;
7059 struct rcu_string *name;
7060
7061 if (device->bdev)
7062 fs_devices->open_devices--;
7063
7064 if (device->writeable &&
7065 device->devid != BTRFS_DEV_REPLACE_DEVID) {
7066 list_del_init(&device->dev_alloc_list);
7067 fs_devices->rw_devices--;
7068 }
7069
7070 if (device->missing)
7071 fs_devices->missing_devices--;
7072
7073 new_device = btrfs_alloc_device(NULL, &device->devid,
7074 device->uuid);
7075 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
7076
7077 /* Safe because we are under uuid_mutex */
7078 if (device->name) {
7079 name = rcu_string_strdup(device->name->str, GFP_NOFS);
7080 BUG_ON(!name); /* -ENOMEM */
7081 rcu_assign_pointer(new_device->name, name);
7082 }
7083
7084 list_replace_rcu(&device->dev_list, &new_device->dev_list);
7085 new_device->fs_devices = device->fs_devices;
7086
7087 call_rcu(&device->rcu, free_device);
7088}
This page took 0.049176 seconds and 5 git commands to generate.