Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/iocontext.h>
24 #include <linux/capability.h>
25 #include <linux/ratelimit.h>
26 #include <linux/kthread.h>
27 #include <linux/raid/pq.h>
28 #include <linux/semaphore.h>
29 #include <linux/uuid.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 [BTRFS_RAID_RAID10] = {
47 .sub_stripes = 2,
48 .dev_stripes = 1,
49 .devs_max = 0, /* 0 == as many as possible */
50 .devs_min = 4,
51 .tolerated_failures = 1,
52 .devs_increment = 2,
53 .ncopies = 2,
54 },
55 [BTRFS_RAID_RAID1] = {
56 .sub_stripes = 1,
57 .dev_stripes = 1,
58 .devs_max = 2,
59 .devs_min = 2,
60 .tolerated_failures = 1,
61 .devs_increment = 2,
62 .ncopies = 2,
63 },
64 [BTRFS_RAID_DUP] = {
65 .sub_stripes = 1,
66 .dev_stripes = 2,
67 .devs_max = 1,
68 .devs_min = 1,
69 .tolerated_failures = 0,
70 .devs_increment = 1,
71 .ncopies = 2,
72 },
73 [BTRFS_RAID_RAID0] = {
74 .sub_stripes = 1,
75 .dev_stripes = 1,
76 .devs_max = 0,
77 .devs_min = 2,
78 .tolerated_failures = 0,
79 .devs_increment = 1,
80 .ncopies = 1,
81 },
82 [BTRFS_RAID_SINGLE] = {
83 .sub_stripes = 1,
84 .dev_stripes = 1,
85 .devs_max = 1,
86 .devs_min = 1,
87 .tolerated_failures = 0,
88 .devs_increment = 1,
89 .ncopies = 1,
90 },
91 [BTRFS_RAID_RAID5] = {
92 .sub_stripes = 1,
93 .dev_stripes = 1,
94 .devs_max = 0,
95 .devs_min = 2,
96 .tolerated_failures = 1,
97 .devs_increment = 1,
98 .ncopies = 2,
99 },
100 [BTRFS_RAID_RAID6] = {
101 .sub_stripes = 1,
102 .dev_stripes = 1,
103 .devs_max = 0,
104 .devs_min = 3,
105 .tolerated_failures = 2,
106 .devs_increment = 1,
107 .ncopies = 3,
108 },
109 };
110
111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 [BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 [BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
114 [BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
115 [BTRFS_RAID_RAID0] = BTRFS_BLOCK_GROUP_RAID0,
116 [BTRFS_RAID_SINGLE] = 0,
117 [BTRFS_RAID_RAID5] = BTRFS_BLOCK_GROUP_RAID5,
118 [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6,
119 };
120
121 /*
122 * Table to convert BTRFS_RAID_* to the error code if minimum number of devices
123 * condition is not met. Zero means there's no corresponding
124 * BTRFS_ERROR_DEV_*_NOT_MET value.
125 */
126 const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
127 [BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
128 [BTRFS_RAID_RAID1] = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
129 [BTRFS_RAID_DUP] = 0,
130 [BTRFS_RAID_RAID0] = 0,
131 [BTRFS_RAID_SINGLE] = 0,
132 [BTRFS_RAID_RAID5] = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
133 [BTRFS_RAID_RAID6] = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
134 };
135
136 static int init_first_rw_device(struct btrfs_trans_handle *trans,
137 struct btrfs_root *root,
138 struct btrfs_device *device);
139 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
140 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
141 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
142 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
143
144 DEFINE_MUTEX(uuid_mutex);
145 static LIST_HEAD(fs_uuids);
146 struct list_head *btrfs_get_fs_uuids(void)
147 {
148 return &fs_uuids;
149 }
150
151 static struct btrfs_fs_devices *__alloc_fs_devices(void)
152 {
153 struct btrfs_fs_devices *fs_devs;
154
155 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
156 if (!fs_devs)
157 return ERR_PTR(-ENOMEM);
158
159 mutex_init(&fs_devs->device_list_mutex);
160
161 INIT_LIST_HEAD(&fs_devs->devices);
162 INIT_LIST_HEAD(&fs_devs->resized_devices);
163 INIT_LIST_HEAD(&fs_devs->alloc_list);
164 INIT_LIST_HEAD(&fs_devs->list);
165
166 return fs_devs;
167 }
168
169 /**
170 * alloc_fs_devices - allocate struct btrfs_fs_devices
171 * @fsid: a pointer to UUID for this FS. If NULL a new UUID is
172 * generated.
173 *
174 * Return: a pointer to a new &struct btrfs_fs_devices on success;
175 * ERR_PTR() on error. Returned struct is not linked onto any lists and
176 * can be destroyed with kfree() right away.
177 */
178 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
179 {
180 struct btrfs_fs_devices *fs_devs;
181
182 fs_devs = __alloc_fs_devices();
183 if (IS_ERR(fs_devs))
184 return fs_devs;
185
186 if (fsid)
187 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
188 else
189 generate_random_uuid(fs_devs->fsid);
190
191 return fs_devs;
192 }
193
194 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
195 {
196 struct btrfs_device *device;
197 WARN_ON(fs_devices->opened);
198 while (!list_empty(&fs_devices->devices)) {
199 device = list_entry(fs_devices->devices.next,
200 struct btrfs_device, dev_list);
201 list_del(&device->dev_list);
202 rcu_string_free(device->name);
203 kfree(device);
204 }
205 kfree(fs_devices);
206 }
207
208 static void btrfs_kobject_uevent(struct block_device *bdev,
209 enum kobject_action action)
210 {
211 int ret;
212
213 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
214 if (ret)
215 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
216 action,
217 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
218 &disk_to_dev(bdev->bd_disk)->kobj);
219 }
220
221 void btrfs_cleanup_fs_uuids(void)
222 {
223 struct btrfs_fs_devices *fs_devices;
224
225 while (!list_empty(&fs_uuids)) {
226 fs_devices = list_entry(fs_uuids.next,
227 struct btrfs_fs_devices, list);
228 list_del(&fs_devices->list);
229 free_fs_devices(fs_devices);
230 }
231 }
232
233 static struct btrfs_device *__alloc_device(void)
234 {
235 struct btrfs_device *dev;
236
237 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
238 if (!dev)
239 return ERR_PTR(-ENOMEM);
240
241 INIT_LIST_HEAD(&dev->dev_list);
242 INIT_LIST_HEAD(&dev->dev_alloc_list);
243 INIT_LIST_HEAD(&dev->resized_list);
244
245 spin_lock_init(&dev->io_lock);
246
247 spin_lock_init(&dev->reada_lock);
248 atomic_set(&dev->reada_in_flight, 0);
249 atomic_set(&dev->dev_stats_ccnt, 0);
250 btrfs_device_data_ordered_init(dev);
251 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
252 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
253
254 return dev;
255 }
256
257 static noinline struct btrfs_device *__find_device(struct list_head *head,
258 u64 devid, u8 *uuid)
259 {
260 struct btrfs_device *dev;
261
262 list_for_each_entry(dev, head, dev_list) {
263 if (dev->devid == devid &&
264 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
265 return dev;
266 }
267 }
268 return NULL;
269 }
270
271 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
272 {
273 struct btrfs_fs_devices *fs_devices;
274
275 list_for_each_entry(fs_devices, &fs_uuids, list) {
276 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
277 return fs_devices;
278 }
279 return NULL;
280 }
281
282 static int
283 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
284 int flush, struct block_device **bdev,
285 struct buffer_head **bh)
286 {
287 int ret;
288
289 *bdev = blkdev_get_by_path(device_path, flags, holder);
290
291 if (IS_ERR(*bdev)) {
292 ret = PTR_ERR(*bdev);
293 goto error;
294 }
295
296 if (flush)
297 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
298 ret = set_blocksize(*bdev, 4096);
299 if (ret) {
300 blkdev_put(*bdev, flags);
301 goto error;
302 }
303 invalidate_bdev(*bdev);
304 *bh = btrfs_read_dev_super(*bdev);
305 if (IS_ERR(*bh)) {
306 ret = PTR_ERR(*bh);
307 blkdev_put(*bdev, flags);
308 goto error;
309 }
310
311 return 0;
312
313 error:
314 *bdev = NULL;
315 *bh = NULL;
316 return ret;
317 }
318
319 static void requeue_list(struct btrfs_pending_bios *pending_bios,
320 struct bio *head, struct bio *tail)
321 {
322
323 struct bio *old_head;
324
325 old_head = pending_bios->head;
326 pending_bios->head = head;
327 if (pending_bios->tail)
328 tail->bi_next = old_head;
329 else
330 pending_bios->tail = tail;
331 }
332
333 /*
334 * we try to collect pending bios for a device so we don't get a large
335 * number of procs sending bios down to the same device. This greatly
336 * improves the schedulers ability to collect and merge the bios.
337 *
338 * But, it also turns into a long list of bios to process and that is sure
339 * to eventually make the worker thread block. The solution here is to
340 * make some progress and then put this work struct back at the end of
341 * the list if the block device is congested. This way, multiple devices
342 * can make progress from a single worker thread.
343 */
344 static noinline void run_scheduled_bios(struct btrfs_device *device)
345 {
346 struct bio *pending;
347 struct backing_dev_info *bdi;
348 struct btrfs_fs_info *fs_info;
349 struct btrfs_pending_bios *pending_bios;
350 struct bio *tail;
351 struct bio *cur;
352 int again = 0;
353 unsigned long num_run;
354 unsigned long batch_run = 0;
355 unsigned long limit;
356 unsigned long last_waited = 0;
357 int force_reg = 0;
358 int sync_pending = 0;
359 struct blk_plug plug;
360
361 /*
362 * this function runs all the bios we've collected for
363 * a particular device. We don't want to wander off to
364 * another device without first sending all of these down.
365 * So, setup a plug here and finish it off before we return
366 */
367 blk_start_plug(&plug);
368
369 bdi = blk_get_backing_dev_info(device->bdev);
370 fs_info = device->dev_root->fs_info;
371 limit = btrfs_async_submit_limit(fs_info);
372 limit = limit * 2 / 3;
373
374 loop:
375 spin_lock(&device->io_lock);
376
377 loop_lock:
378 num_run = 0;
379
380 /* take all the bios off the list at once and process them
381 * later on (without the lock held). But, remember the
382 * tail and other pointers so the bios can be properly reinserted
383 * into the list if we hit congestion
384 */
385 if (!force_reg && device->pending_sync_bios.head) {
386 pending_bios = &device->pending_sync_bios;
387 force_reg = 1;
388 } else {
389 pending_bios = &device->pending_bios;
390 force_reg = 0;
391 }
392
393 pending = pending_bios->head;
394 tail = pending_bios->tail;
395 WARN_ON(pending && !tail);
396
397 /*
398 * if pending was null this time around, no bios need processing
399 * at all and we can stop. Otherwise it'll loop back up again
400 * and do an additional check so no bios are missed.
401 *
402 * device->running_pending is used to synchronize with the
403 * schedule_bio code.
404 */
405 if (device->pending_sync_bios.head == NULL &&
406 device->pending_bios.head == NULL) {
407 again = 0;
408 device->running_pending = 0;
409 } else {
410 again = 1;
411 device->running_pending = 1;
412 }
413
414 pending_bios->head = NULL;
415 pending_bios->tail = NULL;
416
417 spin_unlock(&device->io_lock);
418
419 while (pending) {
420
421 rmb();
422 /* we want to work on both lists, but do more bios on the
423 * sync list than the regular list
424 */
425 if ((num_run > 32 &&
426 pending_bios != &device->pending_sync_bios &&
427 device->pending_sync_bios.head) ||
428 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
429 device->pending_bios.head)) {
430 spin_lock(&device->io_lock);
431 requeue_list(pending_bios, pending, tail);
432 goto loop_lock;
433 }
434
435 cur = pending;
436 pending = pending->bi_next;
437 cur->bi_next = NULL;
438
439 /*
440 * atomic_dec_return implies a barrier for waitqueue_active
441 */
442 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
443 waitqueue_active(&fs_info->async_submit_wait))
444 wake_up(&fs_info->async_submit_wait);
445
446 BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
447
448 /*
449 * if we're doing the sync list, record that our
450 * plug has some sync requests on it
451 *
452 * If we're doing the regular list and there are
453 * sync requests sitting around, unplug before
454 * we add more
455 */
456 if (pending_bios == &device->pending_sync_bios) {
457 sync_pending = 1;
458 } else if (sync_pending) {
459 blk_finish_plug(&plug);
460 blk_start_plug(&plug);
461 sync_pending = 0;
462 }
463
464 btrfsic_submit_bio(cur);
465 num_run++;
466 batch_run++;
467
468 cond_resched();
469
470 /*
471 * we made progress, there is more work to do and the bdi
472 * is now congested. Back off and let other work structs
473 * run instead
474 */
475 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
476 fs_info->fs_devices->open_devices > 1) {
477 struct io_context *ioc;
478
479 ioc = current->io_context;
480
481 /*
482 * the main goal here is that we don't want to
483 * block if we're going to be able to submit
484 * more requests without blocking.
485 *
486 * This code does two great things, it pokes into
487 * the elevator code from a filesystem _and_
488 * it makes assumptions about how batching works.
489 */
490 if (ioc && ioc->nr_batch_requests > 0 &&
491 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
492 (last_waited == 0 ||
493 ioc->last_waited == last_waited)) {
494 /*
495 * we want to go through our batch of
496 * requests and stop. So, we copy out
497 * the ioc->last_waited time and test
498 * against it before looping
499 */
500 last_waited = ioc->last_waited;
501 cond_resched();
502 continue;
503 }
504 spin_lock(&device->io_lock);
505 requeue_list(pending_bios, pending, tail);
506 device->running_pending = 1;
507
508 spin_unlock(&device->io_lock);
509 btrfs_queue_work(fs_info->submit_workers,
510 &device->work);
511 goto done;
512 }
513 /* unplug every 64 requests just for good measure */
514 if (batch_run % 64 == 0) {
515 blk_finish_plug(&plug);
516 blk_start_plug(&plug);
517 sync_pending = 0;
518 }
519 }
520
521 cond_resched();
522 if (again)
523 goto loop;
524
525 spin_lock(&device->io_lock);
526 if (device->pending_bios.head || device->pending_sync_bios.head)
527 goto loop_lock;
528 spin_unlock(&device->io_lock);
529
530 done:
531 blk_finish_plug(&plug);
532 }
533
534 static void pending_bios_fn(struct btrfs_work *work)
535 {
536 struct btrfs_device *device;
537
538 device = container_of(work, struct btrfs_device, work);
539 run_scheduled_bios(device);
540 }
541
542
543 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
544 {
545 struct btrfs_fs_devices *fs_devs;
546 struct btrfs_device *dev;
547
548 if (!cur_dev->name)
549 return;
550
551 list_for_each_entry(fs_devs, &fs_uuids, list) {
552 int del = 1;
553
554 if (fs_devs->opened)
555 continue;
556 if (fs_devs->seeding)
557 continue;
558
559 list_for_each_entry(dev, &fs_devs->devices, dev_list) {
560
561 if (dev == cur_dev)
562 continue;
563 if (!dev->name)
564 continue;
565
566 /*
567 * Todo: This won't be enough. What if the same device
568 * comes back (with new uuid and) with its mapper path?
569 * But for now, this does help as mostly an admin will
570 * either use mapper or non mapper path throughout.
571 */
572 rcu_read_lock();
573 del = strcmp(rcu_str_deref(dev->name),
574 rcu_str_deref(cur_dev->name));
575 rcu_read_unlock();
576 if (!del)
577 break;
578 }
579
580 if (!del) {
581 /* delete the stale device */
582 if (fs_devs->num_devices == 1) {
583 btrfs_sysfs_remove_fsid(fs_devs);
584 list_del(&fs_devs->list);
585 free_fs_devices(fs_devs);
586 } else {
587 fs_devs->num_devices--;
588 list_del(&dev->dev_list);
589 rcu_string_free(dev->name);
590 kfree(dev);
591 }
592 break;
593 }
594 }
595 }
596
597 /*
598 * Add new device to list of registered devices
599 *
600 * Returns:
601 * 1 - first time device is seen
602 * 0 - device already known
603 * < 0 - error
604 */
605 static noinline int device_list_add(const char *path,
606 struct btrfs_super_block *disk_super,
607 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
608 {
609 struct btrfs_device *device;
610 struct btrfs_fs_devices *fs_devices;
611 struct rcu_string *name;
612 int ret = 0;
613 u64 found_transid = btrfs_super_generation(disk_super);
614
615 fs_devices = find_fsid(disk_super->fsid);
616 if (!fs_devices) {
617 fs_devices = alloc_fs_devices(disk_super->fsid);
618 if (IS_ERR(fs_devices))
619 return PTR_ERR(fs_devices);
620
621 list_add(&fs_devices->list, &fs_uuids);
622
623 device = NULL;
624 } else {
625 device = __find_device(&fs_devices->devices, devid,
626 disk_super->dev_item.uuid);
627 }
628
629 if (!device) {
630 if (fs_devices->opened)
631 return -EBUSY;
632
633 device = btrfs_alloc_device(NULL, &devid,
634 disk_super->dev_item.uuid);
635 if (IS_ERR(device)) {
636 /* we can safely leave the fs_devices entry around */
637 return PTR_ERR(device);
638 }
639
640 name = rcu_string_strdup(path, GFP_NOFS);
641 if (!name) {
642 kfree(device);
643 return -ENOMEM;
644 }
645 rcu_assign_pointer(device->name, name);
646
647 mutex_lock(&fs_devices->device_list_mutex);
648 list_add_rcu(&device->dev_list, &fs_devices->devices);
649 fs_devices->num_devices++;
650 mutex_unlock(&fs_devices->device_list_mutex);
651
652 ret = 1;
653 device->fs_devices = fs_devices;
654 } else if (!device->name || strcmp(device->name->str, path)) {
655 /*
656 * When FS is already mounted.
657 * 1. If you are here and if the device->name is NULL that
658 * means this device was missing at time of FS mount.
659 * 2. If you are here and if the device->name is different
660 * from 'path' that means either
661 * a. The same device disappeared and reappeared with
662 * different name. or
663 * b. The missing-disk-which-was-replaced, has
664 * reappeared now.
665 *
666 * We must allow 1 and 2a above. But 2b would be a spurious
667 * and unintentional.
668 *
669 * Further in case of 1 and 2a above, the disk at 'path'
670 * would have missed some transaction when it was away and
671 * in case of 2a the stale bdev has to be updated as well.
672 * 2b must not be allowed at all time.
673 */
674
675 /*
676 * For now, we do allow update to btrfs_fs_device through the
677 * btrfs dev scan cli after FS has been mounted. We're still
678 * tracking a problem where systems fail mount by subvolume id
679 * when we reject replacement on a mounted FS.
680 */
681 if (!fs_devices->opened && found_transid < device->generation) {
682 /*
683 * That is if the FS is _not_ mounted and if you
684 * are here, that means there is more than one
685 * disk with same uuid and devid.We keep the one
686 * with larger generation number or the last-in if
687 * generation are equal.
688 */
689 return -EEXIST;
690 }
691
692 name = rcu_string_strdup(path, GFP_NOFS);
693 if (!name)
694 return -ENOMEM;
695 rcu_string_free(device->name);
696 rcu_assign_pointer(device->name, name);
697 if (device->missing) {
698 fs_devices->missing_devices--;
699 device->missing = 0;
700 }
701 }
702
703 /*
704 * Unmount does not free the btrfs_device struct but would zero
705 * generation along with most of the other members. So just update
706 * it back. We need it to pick the disk with largest generation
707 * (as above).
708 */
709 if (!fs_devices->opened)
710 device->generation = found_transid;
711
712 /*
713 * if there is new btrfs on an already registered device,
714 * then remove the stale device entry.
715 */
716 if (ret > 0)
717 btrfs_free_stale_device(device);
718
719 *fs_devices_ret = fs_devices;
720
721 return ret;
722 }
723
724 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
725 {
726 struct btrfs_fs_devices *fs_devices;
727 struct btrfs_device *device;
728 struct btrfs_device *orig_dev;
729
730 fs_devices = alloc_fs_devices(orig->fsid);
731 if (IS_ERR(fs_devices))
732 return fs_devices;
733
734 mutex_lock(&orig->device_list_mutex);
735 fs_devices->total_devices = orig->total_devices;
736
737 /* We have held the volume lock, it is safe to get the devices. */
738 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
739 struct rcu_string *name;
740
741 device = btrfs_alloc_device(NULL, &orig_dev->devid,
742 orig_dev->uuid);
743 if (IS_ERR(device))
744 goto error;
745
746 /*
747 * This is ok to do without rcu read locked because we hold the
748 * uuid mutex so nothing we touch in here is going to disappear.
749 */
750 if (orig_dev->name) {
751 name = rcu_string_strdup(orig_dev->name->str,
752 GFP_KERNEL);
753 if (!name) {
754 kfree(device);
755 goto error;
756 }
757 rcu_assign_pointer(device->name, name);
758 }
759
760 list_add(&device->dev_list, &fs_devices->devices);
761 device->fs_devices = fs_devices;
762 fs_devices->num_devices++;
763 }
764 mutex_unlock(&orig->device_list_mutex);
765 return fs_devices;
766 error:
767 mutex_unlock(&orig->device_list_mutex);
768 free_fs_devices(fs_devices);
769 return ERR_PTR(-ENOMEM);
770 }
771
772 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
773 {
774 struct btrfs_device *device, *next;
775 struct btrfs_device *latest_dev = NULL;
776
777 mutex_lock(&uuid_mutex);
778 again:
779 /* This is the initialized path, it is safe to release the devices. */
780 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
781 if (device->in_fs_metadata) {
782 if (!device->is_tgtdev_for_dev_replace &&
783 (!latest_dev ||
784 device->generation > latest_dev->generation)) {
785 latest_dev = device;
786 }
787 continue;
788 }
789
790 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
791 /*
792 * In the first step, keep the device which has
793 * the correct fsid and the devid that is used
794 * for the dev_replace procedure.
795 * In the second step, the dev_replace state is
796 * read from the device tree and it is known
797 * whether the procedure is really active or
798 * not, which means whether this device is
799 * used or whether it should be removed.
800 */
801 if (step == 0 || device->is_tgtdev_for_dev_replace) {
802 continue;
803 }
804 }
805 if (device->bdev) {
806 blkdev_put(device->bdev, device->mode);
807 device->bdev = NULL;
808 fs_devices->open_devices--;
809 }
810 if (device->writeable) {
811 list_del_init(&device->dev_alloc_list);
812 device->writeable = 0;
813 if (!device->is_tgtdev_for_dev_replace)
814 fs_devices->rw_devices--;
815 }
816 list_del_init(&device->dev_list);
817 fs_devices->num_devices--;
818 rcu_string_free(device->name);
819 kfree(device);
820 }
821
822 if (fs_devices->seed) {
823 fs_devices = fs_devices->seed;
824 goto again;
825 }
826
827 fs_devices->latest_bdev = latest_dev->bdev;
828
829 mutex_unlock(&uuid_mutex);
830 }
831
832 static void __free_device(struct work_struct *work)
833 {
834 struct btrfs_device *device;
835
836 device = container_of(work, struct btrfs_device, rcu_work);
837
838 if (device->bdev)
839 blkdev_put(device->bdev, device->mode);
840
841 rcu_string_free(device->name);
842 kfree(device);
843 }
844
845 static void free_device(struct rcu_head *head)
846 {
847 struct btrfs_device *device;
848
849 device = container_of(head, struct btrfs_device, rcu);
850
851 INIT_WORK(&device->rcu_work, __free_device);
852 schedule_work(&device->rcu_work);
853 }
854
855 static void btrfs_close_one_device(struct btrfs_device *device)
856 {
857 struct btrfs_fs_devices *fs_devices = device->fs_devices;
858 struct btrfs_device *new_device;
859 struct rcu_string *name;
860
861 if (device->bdev)
862 fs_devices->open_devices--;
863
864 if (device->writeable &&
865 device->devid != BTRFS_DEV_REPLACE_DEVID) {
866 list_del_init(&device->dev_alloc_list);
867 fs_devices->rw_devices--;
868 }
869
870 if (device->missing)
871 fs_devices->missing_devices--;
872
873 if (device->bdev && device->writeable) {
874 sync_blockdev(device->bdev);
875 invalidate_bdev(device->bdev);
876 }
877
878 new_device = btrfs_alloc_device(NULL, &device->devid,
879 device->uuid);
880 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
881
882 /* Safe because we are under uuid_mutex */
883 if (device->name) {
884 name = rcu_string_strdup(device->name->str, GFP_NOFS);
885 BUG_ON(!name); /* -ENOMEM */
886 rcu_assign_pointer(new_device->name, name);
887 }
888
889 list_replace_rcu(&device->dev_list, &new_device->dev_list);
890 new_device->fs_devices = device->fs_devices;
891
892 call_rcu(&device->rcu, free_device);
893 }
894
895 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
896 {
897 struct btrfs_device *device, *tmp;
898
899 if (--fs_devices->opened > 0)
900 return 0;
901
902 mutex_lock(&fs_devices->device_list_mutex);
903 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
904 btrfs_close_one_device(device);
905 }
906 mutex_unlock(&fs_devices->device_list_mutex);
907
908 WARN_ON(fs_devices->open_devices);
909 WARN_ON(fs_devices->rw_devices);
910 fs_devices->opened = 0;
911 fs_devices->seeding = 0;
912
913 return 0;
914 }
915
916 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
917 {
918 struct btrfs_fs_devices *seed_devices = NULL;
919 int ret;
920
921 mutex_lock(&uuid_mutex);
922 ret = __btrfs_close_devices(fs_devices);
923 if (!fs_devices->opened) {
924 seed_devices = fs_devices->seed;
925 fs_devices->seed = NULL;
926 }
927 mutex_unlock(&uuid_mutex);
928
929 while (seed_devices) {
930 fs_devices = seed_devices;
931 seed_devices = fs_devices->seed;
932 __btrfs_close_devices(fs_devices);
933 free_fs_devices(fs_devices);
934 }
935 /*
936 * Wait for rcu kworkers under __btrfs_close_devices
937 * to finish all blkdev_puts so device is really
938 * free when umount is done.
939 */
940 rcu_barrier();
941 return ret;
942 }
943
944 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
945 fmode_t flags, void *holder)
946 {
947 struct request_queue *q;
948 struct block_device *bdev;
949 struct list_head *head = &fs_devices->devices;
950 struct btrfs_device *device;
951 struct btrfs_device *latest_dev = NULL;
952 struct buffer_head *bh;
953 struct btrfs_super_block *disk_super;
954 u64 devid;
955 int seeding = 1;
956 int ret = 0;
957
958 flags |= FMODE_EXCL;
959
960 list_for_each_entry(device, head, dev_list) {
961 if (device->bdev)
962 continue;
963 if (!device->name)
964 continue;
965
966 /* Just open everything we can; ignore failures here */
967 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
968 &bdev, &bh))
969 continue;
970
971 disk_super = (struct btrfs_super_block *)bh->b_data;
972 devid = btrfs_stack_device_id(&disk_super->dev_item);
973 if (devid != device->devid)
974 goto error_brelse;
975
976 if (memcmp(device->uuid, disk_super->dev_item.uuid,
977 BTRFS_UUID_SIZE))
978 goto error_brelse;
979
980 device->generation = btrfs_super_generation(disk_super);
981 if (!latest_dev ||
982 device->generation > latest_dev->generation)
983 latest_dev = device;
984
985 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
986 device->writeable = 0;
987 } else {
988 device->writeable = !bdev_read_only(bdev);
989 seeding = 0;
990 }
991
992 q = bdev_get_queue(bdev);
993 if (blk_queue_discard(q))
994 device->can_discard = 1;
995
996 device->bdev = bdev;
997 device->in_fs_metadata = 0;
998 device->mode = flags;
999
1000 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1001 fs_devices->rotating = 1;
1002
1003 fs_devices->open_devices++;
1004 if (device->writeable &&
1005 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1006 fs_devices->rw_devices++;
1007 list_add(&device->dev_alloc_list,
1008 &fs_devices->alloc_list);
1009 }
1010 brelse(bh);
1011 continue;
1012
1013 error_brelse:
1014 brelse(bh);
1015 blkdev_put(bdev, flags);
1016 continue;
1017 }
1018 if (fs_devices->open_devices == 0) {
1019 ret = -EINVAL;
1020 goto out;
1021 }
1022 fs_devices->seeding = seeding;
1023 fs_devices->opened = 1;
1024 fs_devices->latest_bdev = latest_dev->bdev;
1025 fs_devices->total_rw_bytes = 0;
1026 out:
1027 return ret;
1028 }
1029
1030 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1031 fmode_t flags, void *holder)
1032 {
1033 int ret;
1034
1035 mutex_lock(&uuid_mutex);
1036 if (fs_devices->opened) {
1037 fs_devices->opened++;
1038 ret = 0;
1039 } else {
1040 ret = __btrfs_open_devices(fs_devices, flags, holder);
1041 }
1042 mutex_unlock(&uuid_mutex);
1043 return ret;
1044 }
1045
1046 void btrfs_release_disk_super(struct page *page)
1047 {
1048 kunmap(page);
1049 put_page(page);
1050 }
1051
1052 int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1053 struct page **page, struct btrfs_super_block **disk_super)
1054 {
1055 void *p;
1056 pgoff_t index;
1057
1058 /* make sure our super fits in the device */
1059 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1060 return 1;
1061
1062 /* make sure our super fits in the page */
1063 if (sizeof(**disk_super) > PAGE_SIZE)
1064 return 1;
1065
1066 /* make sure our super doesn't straddle pages on disk */
1067 index = bytenr >> PAGE_SHIFT;
1068 if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1069 return 1;
1070
1071 /* pull in the page with our super */
1072 *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1073 index, GFP_KERNEL);
1074
1075 if (IS_ERR_OR_NULL(*page))
1076 return 1;
1077
1078 p = kmap(*page);
1079
1080 /* align our pointer to the offset of the super block */
1081 *disk_super = p + (bytenr & ~PAGE_MASK);
1082
1083 if (btrfs_super_bytenr(*disk_super) != bytenr ||
1084 btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1085 btrfs_release_disk_super(*page);
1086 return 1;
1087 }
1088
1089 if ((*disk_super)->label[0] &&
1090 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1091 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1092
1093 return 0;
1094 }
1095
1096 /*
1097 * Look for a btrfs signature on a device. This may be called out of the mount path
1098 * and we are not allowed to call set_blocksize during the scan. The superblock
1099 * is read via pagecache
1100 */
1101 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
1102 struct btrfs_fs_devices **fs_devices_ret)
1103 {
1104 struct btrfs_super_block *disk_super;
1105 struct block_device *bdev;
1106 struct page *page;
1107 int ret = -EINVAL;
1108 u64 devid;
1109 u64 transid;
1110 u64 total_devices;
1111 u64 bytenr;
1112
1113 /*
1114 * we would like to check all the supers, but that would make
1115 * a btrfs mount succeed after a mkfs from a different FS.
1116 * So, we need to add a special mount option to scan for
1117 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1118 */
1119 bytenr = btrfs_sb_offset(0);
1120 flags |= FMODE_EXCL;
1121 mutex_lock(&uuid_mutex);
1122
1123 bdev = blkdev_get_by_path(path, flags, holder);
1124 if (IS_ERR(bdev)) {
1125 ret = PTR_ERR(bdev);
1126 goto error;
1127 }
1128
1129 if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
1130 goto error_bdev_put;
1131
1132 devid = btrfs_stack_device_id(&disk_super->dev_item);
1133 transid = btrfs_super_generation(disk_super);
1134 total_devices = btrfs_super_num_devices(disk_super);
1135
1136 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1137 if (ret > 0) {
1138 if (disk_super->label[0]) {
1139 printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1140 } else {
1141 printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1142 }
1143
1144 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1145 ret = 0;
1146 }
1147 if (!ret && fs_devices_ret)
1148 (*fs_devices_ret)->total_devices = total_devices;
1149
1150 btrfs_release_disk_super(page);
1151
1152 error_bdev_put:
1153 blkdev_put(bdev, flags);
1154 error:
1155 mutex_unlock(&uuid_mutex);
1156 return ret;
1157 }
1158
1159 /* helper to account the used device space in the range */
1160 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1161 u64 end, u64 *length)
1162 {
1163 struct btrfs_key key;
1164 struct btrfs_root *root = device->dev_root;
1165 struct btrfs_dev_extent *dev_extent;
1166 struct btrfs_path *path;
1167 u64 extent_end;
1168 int ret;
1169 int slot;
1170 struct extent_buffer *l;
1171
1172 *length = 0;
1173
1174 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1175 return 0;
1176
1177 path = btrfs_alloc_path();
1178 if (!path)
1179 return -ENOMEM;
1180 path->reada = READA_FORWARD;
1181
1182 key.objectid = device->devid;
1183 key.offset = start;
1184 key.type = BTRFS_DEV_EXTENT_KEY;
1185
1186 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1187 if (ret < 0)
1188 goto out;
1189 if (ret > 0) {
1190 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1191 if (ret < 0)
1192 goto out;
1193 }
1194
1195 while (1) {
1196 l = path->nodes[0];
1197 slot = path->slots[0];
1198 if (slot >= btrfs_header_nritems(l)) {
1199 ret = btrfs_next_leaf(root, path);
1200 if (ret == 0)
1201 continue;
1202 if (ret < 0)
1203 goto out;
1204
1205 break;
1206 }
1207 btrfs_item_key_to_cpu(l, &key, slot);
1208
1209 if (key.objectid < device->devid)
1210 goto next;
1211
1212 if (key.objectid > device->devid)
1213 break;
1214
1215 if (key.type != BTRFS_DEV_EXTENT_KEY)
1216 goto next;
1217
1218 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1219 extent_end = key.offset + btrfs_dev_extent_length(l,
1220 dev_extent);
1221 if (key.offset <= start && extent_end > end) {
1222 *length = end - start + 1;
1223 break;
1224 } else if (key.offset <= start && extent_end > start)
1225 *length += extent_end - start;
1226 else if (key.offset > start && extent_end <= end)
1227 *length += extent_end - key.offset;
1228 else if (key.offset > start && key.offset <= end) {
1229 *length += end - key.offset + 1;
1230 break;
1231 } else if (key.offset > end)
1232 break;
1233
1234 next:
1235 path->slots[0]++;
1236 }
1237 ret = 0;
1238 out:
1239 btrfs_free_path(path);
1240 return ret;
1241 }
1242
1243 static int contains_pending_extent(struct btrfs_transaction *transaction,
1244 struct btrfs_device *device,
1245 u64 *start, u64 len)
1246 {
1247 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1248 struct extent_map *em;
1249 struct list_head *search_list = &fs_info->pinned_chunks;
1250 int ret = 0;
1251 u64 physical_start = *start;
1252
1253 if (transaction)
1254 search_list = &transaction->pending_chunks;
1255 again:
1256 list_for_each_entry(em, search_list, list) {
1257 struct map_lookup *map;
1258 int i;
1259
1260 map = em->map_lookup;
1261 for (i = 0; i < map->num_stripes; i++) {
1262 u64 end;
1263
1264 if (map->stripes[i].dev != device)
1265 continue;
1266 if (map->stripes[i].physical >= physical_start + len ||
1267 map->stripes[i].physical + em->orig_block_len <=
1268 physical_start)
1269 continue;
1270 /*
1271 * Make sure that while processing the pinned list we do
1272 * not override our *start with a lower value, because
1273 * we can have pinned chunks that fall within this
1274 * device hole and that have lower physical addresses
1275 * than the pending chunks we processed before. If we
1276 * do not take this special care we can end up getting
1277 * 2 pending chunks that start at the same physical
1278 * device offsets because the end offset of a pinned
1279 * chunk can be equal to the start offset of some
1280 * pending chunk.
1281 */
1282 end = map->stripes[i].physical + em->orig_block_len;
1283 if (end > *start) {
1284 *start = end;
1285 ret = 1;
1286 }
1287 }
1288 }
1289 if (search_list != &fs_info->pinned_chunks) {
1290 search_list = &fs_info->pinned_chunks;
1291 goto again;
1292 }
1293
1294 return ret;
1295 }
1296
1297
1298 /*
1299 * find_free_dev_extent_start - find free space in the specified device
1300 * @device: the device which we search the free space in
1301 * @num_bytes: the size of the free space that we need
1302 * @search_start: the position from which to begin the search
1303 * @start: store the start of the free space.
1304 * @len: the size of the free space. that we find, or the size
1305 * of the max free space if we don't find suitable free space
1306 *
1307 * this uses a pretty simple search, the expectation is that it is
1308 * called very infrequently and that a given device has a small number
1309 * of extents
1310 *
1311 * @start is used to store the start of the free space if we find. But if we
1312 * don't find suitable free space, it will be used to store the start position
1313 * of the max free space.
1314 *
1315 * @len is used to store the size of the free space that we find.
1316 * But if we don't find suitable free space, it is used to store the size of
1317 * the max free space.
1318 */
1319 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1320 struct btrfs_device *device, u64 num_bytes,
1321 u64 search_start, u64 *start, u64 *len)
1322 {
1323 struct btrfs_key key;
1324 struct btrfs_root *root = device->dev_root;
1325 struct btrfs_dev_extent *dev_extent;
1326 struct btrfs_path *path;
1327 u64 hole_size;
1328 u64 max_hole_start;
1329 u64 max_hole_size;
1330 u64 extent_end;
1331 u64 search_end = device->total_bytes;
1332 int ret;
1333 int slot;
1334 struct extent_buffer *l;
1335 u64 min_search_start;
1336
1337 /*
1338 * We don't want to overwrite the superblock on the drive nor any area
1339 * used by the boot loader (grub for example), so we make sure to start
1340 * at an offset of at least 1MB.
1341 */
1342 min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1343 search_start = max(search_start, min_search_start);
1344
1345 path = btrfs_alloc_path();
1346 if (!path)
1347 return -ENOMEM;
1348
1349 max_hole_start = search_start;
1350 max_hole_size = 0;
1351
1352 again:
1353 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1354 ret = -ENOSPC;
1355 goto out;
1356 }
1357
1358 path->reada = READA_FORWARD;
1359 path->search_commit_root = 1;
1360 path->skip_locking = 1;
1361
1362 key.objectid = device->devid;
1363 key.offset = search_start;
1364 key.type = BTRFS_DEV_EXTENT_KEY;
1365
1366 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1367 if (ret < 0)
1368 goto out;
1369 if (ret > 0) {
1370 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1371 if (ret < 0)
1372 goto out;
1373 }
1374
1375 while (1) {
1376 l = path->nodes[0];
1377 slot = path->slots[0];
1378 if (slot >= btrfs_header_nritems(l)) {
1379 ret = btrfs_next_leaf(root, path);
1380 if (ret == 0)
1381 continue;
1382 if (ret < 0)
1383 goto out;
1384
1385 break;
1386 }
1387 btrfs_item_key_to_cpu(l, &key, slot);
1388
1389 if (key.objectid < device->devid)
1390 goto next;
1391
1392 if (key.objectid > device->devid)
1393 break;
1394
1395 if (key.type != BTRFS_DEV_EXTENT_KEY)
1396 goto next;
1397
1398 if (key.offset > search_start) {
1399 hole_size = key.offset - search_start;
1400
1401 /*
1402 * Have to check before we set max_hole_start, otherwise
1403 * we could end up sending back this offset anyway.
1404 */
1405 if (contains_pending_extent(transaction, device,
1406 &search_start,
1407 hole_size)) {
1408 if (key.offset >= search_start) {
1409 hole_size = key.offset - search_start;
1410 } else {
1411 WARN_ON_ONCE(1);
1412 hole_size = 0;
1413 }
1414 }
1415
1416 if (hole_size > max_hole_size) {
1417 max_hole_start = search_start;
1418 max_hole_size = hole_size;
1419 }
1420
1421 /*
1422 * If this free space is greater than which we need,
1423 * it must be the max free space that we have found
1424 * until now, so max_hole_start must point to the start
1425 * of this free space and the length of this free space
1426 * is stored in max_hole_size. Thus, we return
1427 * max_hole_start and max_hole_size and go back to the
1428 * caller.
1429 */
1430 if (hole_size >= num_bytes) {
1431 ret = 0;
1432 goto out;
1433 }
1434 }
1435
1436 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1437 extent_end = key.offset + btrfs_dev_extent_length(l,
1438 dev_extent);
1439 if (extent_end > search_start)
1440 search_start = extent_end;
1441 next:
1442 path->slots[0]++;
1443 cond_resched();
1444 }
1445
1446 /*
1447 * At this point, search_start should be the end of
1448 * allocated dev extents, and when shrinking the device,
1449 * search_end may be smaller than search_start.
1450 */
1451 if (search_end > search_start) {
1452 hole_size = search_end - search_start;
1453
1454 if (contains_pending_extent(transaction, device, &search_start,
1455 hole_size)) {
1456 btrfs_release_path(path);
1457 goto again;
1458 }
1459
1460 if (hole_size > max_hole_size) {
1461 max_hole_start = search_start;
1462 max_hole_size = hole_size;
1463 }
1464 }
1465
1466 /* See above. */
1467 if (max_hole_size < num_bytes)
1468 ret = -ENOSPC;
1469 else
1470 ret = 0;
1471
1472 out:
1473 btrfs_free_path(path);
1474 *start = max_hole_start;
1475 if (len)
1476 *len = max_hole_size;
1477 return ret;
1478 }
1479
1480 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1481 struct btrfs_device *device, u64 num_bytes,
1482 u64 *start, u64 *len)
1483 {
1484 /* FIXME use last free of some kind */
1485 return find_free_dev_extent_start(trans->transaction, device,
1486 num_bytes, 0, start, len);
1487 }
1488
1489 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1490 struct btrfs_device *device,
1491 u64 start, u64 *dev_extent_len)
1492 {
1493 int ret;
1494 struct btrfs_path *path;
1495 struct btrfs_root *root = device->dev_root;
1496 struct btrfs_key key;
1497 struct btrfs_key found_key;
1498 struct extent_buffer *leaf = NULL;
1499 struct btrfs_dev_extent *extent = NULL;
1500
1501 path = btrfs_alloc_path();
1502 if (!path)
1503 return -ENOMEM;
1504
1505 key.objectid = device->devid;
1506 key.offset = start;
1507 key.type = BTRFS_DEV_EXTENT_KEY;
1508 again:
1509 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1510 if (ret > 0) {
1511 ret = btrfs_previous_item(root, path, key.objectid,
1512 BTRFS_DEV_EXTENT_KEY);
1513 if (ret)
1514 goto out;
1515 leaf = path->nodes[0];
1516 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1517 extent = btrfs_item_ptr(leaf, path->slots[0],
1518 struct btrfs_dev_extent);
1519 BUG_ON(found_key.offset > start || found_key.offset +
1520 btrfs_dev_extent_length(leaf, extent) < start);
1521 key = found_key;
1522 btrfs_release_path(path);
1523 goto again;
1524 } else if (ret == 0) {
1525 leaf = path->nodes[0];
1526 extent = btrfs_item_ptr(leaf, path->slots[0],
1527 struct btrfs_dev_extent);
1528 } else {
1529 btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
1530 goto out;
1531 }
1532
1533 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1534
1535 ret = btrfs_del_item(trans, root, path);
1536 if (ret) {
1537 btrfs_handle_fs_error(root->fs_info, ret,
1538 "Failed to remove dev extent item");
1539 } else {
1540 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1541 }
1542 out:
1543 btrfs_free_path(path);
1544 return ret;
1545 }
1546
1547 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1548 struct btrfs_device *device,
1549 u64 chunk_tree, u64 chunk_objectid,
1550 u64 chunk_offset, u64 start, u64 num_bytes)
1551 {
1552 int ret;
1553 struct btrfs_path *path;
1554 struct btrfs_root *root = device->dev_root;
1555 struct btrfs_dev_extent *extent;
1556 struct extent_buffer *leaf;
1557 struct btrfs_key key;
1558
1559 WARN_ON(!device->in_fs_metadata);
1560 WARN_ON(device->is_tgtdev_for_dev_replace);
1561 path = btrfs_alloc_path();
1562 if (!path)
1563 return -ENOMEM;
1564
1565 key.objectid = device->devid;
1566 key.offset = start;
1567 key.type = BTRFS_DEV_EXTENT_KEY;
1568 ret = btrfs_insert_empty_item(trans, root, path, &key,
1569 sizeof(*extent));
1570 if (ret)
1571 goto out;
1572
1573 leaf = path->nodes[0];
1574 extent = btrfs_item_ptr(leaf, path->slots[0],
1575 struct btrfs_dev_extent);
1576 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1577 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1578 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1579
1580 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1581 btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1582
1583 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1584 btrfs_mark_buffer_dirty(leaf);
1585 out:
1586 btrfs_free_path(path);
1587 return ret;
1588 }
1589
1590 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1591 {
1592 struct extent_map_tree *em_tree;
1593 struct extent_map *em;
1594 struct rb_node *n;
1595 u64 ret = 0;
1596
1597 em_tree = &fs_info->mapping_tree.map_tree;
1598 read_lock(&em_tree->lock);
1599 n = rb_last(&em_tree->map);
1600 if (n) {
1601 em = rb_entry(n, struct extent_map, rb_node);
1602 ret = em->start + em->len;
1603 }
1604 read_unlock(&em_tree->lock);
1605
1606 return ret;
1607 }
1608
1609 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1610 u64 *devid_ret)
1611 {
1612 int ret;
1613 struct btrfs_key key;
1614 struct btrfs_key found_key;
1615 struct btrfs_path *path;
1616
1617 path = btrfs_alloc_path();
1618 if (!path)
1619 return -ENOMEM;
1620
1621 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1622 key.type = BTRFS_DEV_ITEM_KEY;
1623 key.offset = (u64)-1;
1624
1625 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1626 if (ret < 0)
1627 goto error;
1628
1629 BUG_ON(ret == 0); /* Corruption */
1630
1631 ret = btrfs_previous_item(fs_info->chunk_root, path,
1632 BTRFS_DEV_ITEMS_OBJECTID,
1633 BTRFS_DEV_ITEM_KEY);
1634 if (ret) {
1635 *devid_ret = 1;
1636 } else {
1637 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1638 path->slots[0]);
1639 *devid_ret = found_key.offset + 1;
1640 }
1641 ret = 0;
1642 error:
1643 btrfs_free_path(path);
1644 return ret;
1645 }
1646
1647 /*
1648 * the device information is stored in the chunk root
1649 * the btrfs_device struct should be fully filled in
1650 */
1651 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1652 struct btrfs_root *root,
1653 struct btrfs_device *device)
1654 {
1655 int ret;
1656 struct btrfs_path *path;
1657 struct btrfs_dev_item *dev_item;
1658 struct extent_buffer *leaf;
1659 struct btrfs_key key;
1660 unsigned long ptr;
1661
1662 root = root->fs_info->chunk_root;
1663
1664 path = btrfs_alloc_path();
1665 if (!path)
1666 return -ENOMEM;
1667
1668 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1669 key.type = BTRFS_DEV_ITEM_KEY;
1670 key.offset = device->devid;
1671
1672 ret = btrfs_insert_empty_item(trans, root, path, &key,
1673 sizeof(*dev_item));
1674 if (ret)
1675 goto out;
1676
1677 leaf = path->nodes[0];
1678 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1679
1680 btrfs_set_device_id(leaf, dev_item, device->devid);
1681 btrfs_set_device_generation(leaf, dev_item, 0);
1682 btrfs_set_device_type(leaf, dev_item, device->type);
1683 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1684 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1685 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1686 btrfs_set_device_total_bytes(leaf, dev_item,
1687 btrfs_device_get_disk_total_bytes(device));
1688 btrfs_set_device_bytes_used(leaf, dev_item,
1689 btrfs_device_get_bytes_used(device));
1690 btrfs_set_device_group(leaf, dev_item, 0);
1691 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1692 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1693 btrfs_set_device_start_offset(leaf, dev_item, 0);
1694
1695 ptr = btrfs_device_uuid(dev_item);
1696 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1697 ptr = btrfs_device_fsid(dev_item);
1698 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1699 btrfs_mark_buffer_dirty(leaf);
1700
1701 ret = 0;
1702 out:
1703 btrfs_free_path(path);
1704 return ret;
1705 }
1706
1707 /*
1708 * Function to update ctime/mtime for a given device path.
1709 * Mainly used for ctime/mtime based probe like libblkid.
1710 */
1711 static void update_dev_time(char *path_name)
1712 {
1713 struct file *filp;
1714
1715 filp = filp_open(path_name, O_RDWR, 0);
1716 if (IS_ERR(filp))
1717 return;
1718 file_update_time(filp);
1719 filp_close(filp, NULL);
1720 }
1721
1722 static int btrfs_rm_dev_item(struct btrfs_root *root,
1723 struct btrfs_device *device)
1724 {
1725 int ret;
1726 struct btrfs_path *path;
1727 struct btrfs_key key;
1728 struct btrfs_trans_handle *trans;
1729
1730 root = root->fs_info->chunk_root;
1731
1732 path = btrfs_alloc_path();
1733 if (!path)
1734 return -ENOMEM;
1735
1736 trans = btrfs_start_transaction(root, 0);
1737 if (IS_ERR(trans)) {
1738 btrfs_free_path(path);
1739 return PTR_ERR(trans);
1740 }
1741 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1742 key.type = BTRFS_DEV_ITEM_KEY;
1743 key.offset = device->devid;
1744
1745 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1746 if (ret < 0)
1747 goto out;
1748
1749 if (ret > 0) {
1750 ret = -ENOENT;
1751 goto out;
1752 }
1753
1754 ret = btrfs_del_item(trans, root, path);
1755 if (ret)
1756 goto out;
1757 out:
1758 btrfs_free_path(path);
1759 btrfs_commit_transaction(trans, root);
1760 return ret;
1761 }
1762
1763 /*
1764 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1765 * filesystem. It's up to the caller to adjust that number regarding eg. device
1766 * replace.
1767 */
1768 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1769 u64 num_devices)
1770 {
1771 u64 all_avail;
1772 unsigned seq;
1773 int i;
1774
1775 do {
1776 seq = read_seqbegin(&fs_info->profiles_lock);
1777
1778 all_avail = fs_info->avail_data_alloc_bits |
1779 fs_info->avail_system_alloc_bits |
1780 fs_info->avail_metadata_alloc_bits;
1781 } while (read_seqretry(&fs_info->profiles_lock, seq));
1782
1783 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1784 if (!(all_avail & btrfs_raid_group[i]))
1785 continue;
1786
1787 if (num_devices < btrfs_raid_array[i].devs_min) {
1788 int ret = btrfs_raid_mindev_error[i];
1789
1790 if (ret)
1791 return ret;
1792 }
1793 }
1794
1795 return 0;
1796 }
1797
1798 struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
1799 struct btrfs_device *device)
1800 {
1801 struct btrfs_device *next_device;
1802
1803 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1804 if (next_device != device &&
1805 !next_device->missing && next_device->bdev)
1806 return next_device;
1807 }
1808
1809 return NULL;
1810 }
1811
1812 /*
1813 * Helper function to check if the given device is part of s_bdev / latest_bdev
1814 * and replace it with the provided or the next active device, in the context
1815 * where this function called, there should be always be another device (or
1816 * this_dev) which is active.
1817 */
1818 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
1819 struct btrfs_device *device, struct btrfs_device *this_dev)
1820 {
1821 struct btrfs_device *next_device;
1822
1823 if (this_dev)
1824 next_device = this_dev;
1825 else
1826 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1827 device);
1828 ASSERT(next_device);
1829
1830 if (fs_info->sb->s_bdev &&
1831 (fs_info->sb->s_bdev == device->bdev))
1832 fs_info->sb->s_bdev = next_device->bdev;
1833
1834 if (fs_info->fs_devices->latest_bdev == device->bdev)
1835 fs_info->fs_devices->latest_bdev = next_device->bdev;
1836 }
1837
1838 int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1839 {
1840 struct btrfs_device *device;
1841 struct btrfs_fs_devices *cur_devices;
1842 u64 num_devices;
1843 int ret = 0;
1844 bool clear_super = false;
1845 char *dev_name = NULL;
1846
1847 mutex_lock(&uuid_mutex);
1848
1849 num_devices = root->fs_info->fs_devices->num_devices;
1850 btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1851 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1852 WARN_ON(num_devices < 1);
1853 num_devices--;
1854 }
1855 btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1856
1857 ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
1858 if (ret)
1859 goto out;
1860
1861 ret = btrfs_find_device_by_devspec(root, devid, device_path,
1862 &device);
1863 if (ret)
1864 goto out;
1865
1866 if (device->is_tgtdev_for_dev_replace) {
1867 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1868 goto out;
1869 }
1870
1871 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1872 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1873 goto out;
1874 }
1875
1876 if (device->writeable) {
1877 lock_chunks(root);
1878 list_del_init(&device->dev_alloc_list);
1879 device->fs_devices->rw_devices--;
1880 unlock_chunks(root);
1881 dev_name = kstrdup(device->name->str, GFP_KERNEL);
1882 if (!dev_name) {
1883 ret = -ENOMEM;
1884 goto error_undo;
1885 }
1886 clear_super = true;
1887 }
1888
1889 mutex_unlock(&uuid_mutex);
1890 ret = btrfs_shrink_device(device, 0);
1891 mutex_lock(&uuid_mutex);
1892 if (ret)
1893 goto error_undo;
1894
1895 /*
1896 * TODO: the superblock still includes this device in its num_devices
1897 * counter although write_all_supers() is not locked out. This
1898 * could give a filesystem state which requires a degraded mount.
1899 */
1900 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1901 if (ret)
1902 goto error_undo;
1903
1904 device->in_fs_metadata = 0;
1905 btrfs_scrub_cancel_dev(root->fs_info, device);
1906
1907 /*
1908 * the device list mutex makes sure that we don't change
1909 * the device list while someone else is writing out all
1910 * the device supers. Whoever is writing all supers, should
1911 * lock the device list mutex before getting the number of
1912 * devices in the super block (super_copy). Conversely,
1913 * whoever updates the number of devices in the super block
1914 * (super_copy) should hold the device list mutex.
1915 */
1916
1917 cur_devices = device->fs_devices;
1918 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1919 list_del_rcu(&device->dev_list);
1920
1921 device->fs_devices->num_devices--;
1922 device->fs_devices->total_devices--;
1923
1924 if (device->missing)
1925 device->fs_devices->missing_devices--;
1926
1927 btrfs_assign_next_active_device(root->fs_info, device, NULL);
1928
1929 if (device->bdev) {
1930 device->fs_devices->open_devices--;
1931 /* remove sysfs entry */
1932 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1933 }
1934
1935 call_rcu(&device->rcu, free_device);
1936
1937 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1938 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1939 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1940
1941 if (cur_devices->open_devices == 0) {
1942 struct btrfs_fs_devices *fs_devices;
1943 fs_devices = root->fs_info->fs_devices;
1944 while (fs_devices) {
1945 if (fs_devices->seed == cur_devices) {
1946 fs_devices->seed = cur_devices->seed;
1947 break;
1948 }
1949 fs_devices = fs_devices->seed;
1950 }
1951 cur_devices->seed = NULL;
1952 __btrfs_close_devices(cur_devices);
1953 free_fs_devices(cur_devices);
1954 }
1955
1956 root->fs_info->num_tolerated_disk_barrier_failures =
1957 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1958
1959 /*
1960 * at this point, the device is zero sized. We want to
1961 * remove it from the devices list and zero out the old super
1962 */
1963 if (clear_super) {
1964 struct block_device *bdev;
1965
1966 bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
1967 root->fs_info->bdev_holder);
1968 if (!IS_ERR(bdev)) {
1969 btrfs_scratch_superblocks(bdev, dev_name);
1970 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1971 }
1972 }
1973
1974 out:
1975 kfree(dev_name);
1976
1977 mutex_unlock(&uuid_mutex);
1978 return ret;
1979
1980 error_undo:
1981 if (device->writeable) {
1982 lock_chunks(root);
1983 list_add(&device->dev_alloc_list,
1984 &root->fs_info->fs_devices->alloc_list);
1985 device->fs_devices->rw_devices++;
1986 unlock_chunks(root);
1987 }
1988 goto out;
1989 }
1990
1991 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1992 struct btrfs_device *srcdev)
1993 {
1994 struct btrfs_fs_devices *fs_devices;
1995
1996 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1997
1998 /*
1999 * in case of fs with no seed, srcdev->fs_devices will point
2000 * to fs_devices of fs_info. However when the dev being replaced is
2001 * a seed dev it will point to the seed's local fs_devices. In short
2002 * srcdev will have its correct fs_devices in both the cases.
2003 */
2004 fs_devices = srcdev->fs_devices;
2005
2006 list_del_rcu(&srcdev->dev_list);
2007 list_del_rcu(&srcdev->dev_alloc_list);
2008 fs_devices->num_devices--;
2009 if (srcdev->missing)
2010 fs_devices->missing_devices--;
2011
2012 if (srcdev->writeable)
2013 fs_devices->rw_devices--;
2014
2015 if (srcdev->bdev)
2016 fs_devices->open_devices--;
2017 }
2018
2019 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2020 struct btrfs_device *srcdev)
2021 {
2022 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2023
2024 if (srcdev->writeable) {
2025 /* zero out the old super if it is writable */
2026 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2027 }
2028 call_rcu(&srcdev->rcu, free_device);
2029
2030 /*
2031 * unless fs_devices is seed fs, num_devices shouldn't go
2032 * zero
2033 */
2034 BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
2035
2036 /* if this is no devs we rather delete the fs_devices */
2037 if (!fs_devices->num_devices) {
2038 struct btrfs_fs_devices *tmp_fs_devices;
2039
2040 tmp_fs_devices = fs_info->fs_devices;
2041 while (tmp_fs_devices) {
2042 if (tmp_fs_devices->seed == fs_devices) {
2043 tmp_fs_devices->seed = fs_devices->seed;
2044 break;
2045 }
2046 tmp_fs_devices = tmp_fs_devices->seed;
2047 }
2048 fs_devices->seed = NULL;
2049 __btrfs_close_devices(fs_devices);
2050 free_fs_devices(fs_devices);
2051 }
2052 }
2053
2054 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2055 struct btrfs_device *tgtdev)
2056 {
2057 mutex_lock(&uuid_mutex);
2058 WARN_ON(!tgtdev);
2059 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2060
2061 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2062
2063 if (tgtdev->bdev)
2064 fs_info->fs_devices->open_devices--;
2065
2066 fs_info->fs_devices->num_devices--;
2067
2068 btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
2069
2070 list_del_rcu(&tgtdev->dev_list);
2071
2072 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2073 mutex_unlock(&uuid_mutex);
2074
2075 /*
2076 * The update_dev_time() with in btrfs_scratch_superblocks()
2077 * may lead to a call to btrfs_show_devname() which will try
2078 * to hold device_list_mutex. And here this device
2079 * is already out of device list, so we don't have to hold
2080 * the device_list_mutex lock.
2081 */
2082 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2083 call_rcu(&tgtdev->rcu, free_device);
2084 }
2085
2086 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2087 struct btrfs_device **device)
2088 {
2089 int ret = 0;
2090 struct btrfs_super_block *disk_super;
2091 u64 devid;
2092 u8 *dev_uuid;
2093 struct block_device *bdev;
2094 struct buffer_head *bh;
2095
2096 *device = NULL;
2097 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2098 root->fs_info->bdev_holder, 0, &bdev, &bh);
2099 if (ret)
2100 return ret;
2101 disk_super = (struct btrfs_super_block *)bh->b_data;
2102 devid = btrfs_stack_device_id(&disk_super->dev_item);
2103 dev_uuid = disk_super->dev_item.uuid;
2104 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2105 disk_super->fsid);
2106 brelse(bh);
2107 if (!*device)
2108 ret = -ENOENT;
2109 blkdev_put(bdev, FMODE_READ);
2110 return ret;
2111 }
2112
2113 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2114 char *device_path,
2115 struct btrfs_device **device)
2116 {
2117 *device = NULL;
2118 if (strcmp(device_path, "missing") == 0) {
2119 struct list_head *devices;
2120 struct btrfs_device *tmp;
2121
2122 devices = &root->fs_info->fs_devices->devices;
2123 /*
2124 * It is safe to read the devices since the volume_mutex
2125 * is held by the caller.
2126 */
2127 list_for_each_entry(tmp, devices, dev_list) {
2128 if (tmp->in_fs_metadata && !tmp->bdev) {
2129 *device = tmp;
2130 break;
2131 }
2132 }
2133
2134 if (!*device)
2135 return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2136
2137 return 0;
2138 } else {
2139 return btrfs_find_device_by_path(root, device_path, device);
2140 }
2141 }
2142
2143 /*
2144 * Lookup a device given by device id, or the path if the id is 0.
2145 */
2146 int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
2147 char *devpath,
2148 struct btrfs_device **device)
2149 {
2150 int ret;
2151
2152 if (devid) {
2153 ret = 0;
2154 *device = btrfs_find_device(root->fs_info, devid, NULL,
2155 NULL);
2156 if (!*device)
2157 ret = -ENOENT;
2158 } else {
2159 if (!devpath || !devpath[0])
2160 return -EINVAL;
2161
2162 ret = btrfs_find_device_missing_or_by_path(root, devpath,
2163 device);
2164 }
2165 return ret;
2166 }
2167
2168 /*
2169 * does all the dirty work required for changing file system's UUID.
2170 */
2171 static int btrfs_prepare_sprout(struct btrfs_root *root)
2172 {
2173 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2174 struct btrfs_fs_devices *old_devices;
2175 struct btrfs_fs_devices *seed_devices;
2176 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2177 struct btrfs_device *device;
2178 u64 super_flags;
2179
2180 BUG_ON(!mutex_is_locked(&uuid_mutex));
2181 if (!fs_devices->seeding)
2182 return -EINVAL;
2183
2184 seed_devices = __alloc_fs_devices();
2185 if (IS_ERR(seed_devices))
2186 return PTR_ERR(seed_devices);
2187
2188 old_devices = clone_fs_devices(fs_devices);
2189 if (IS_ERR(old_devices)) {
2190 kfree(seed_devices);
2191 return PTR_ERR(old_devices);
2192 }
2193
2194 list_add(&old_devices->list, &fs_uuids);
2195
2196 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2197 seed_devices->opened = 1;
2198 INIT_LIST_HEAD(&seed_devices->devices);
2199 INIT_LIST_HEAD(&seed_devices->alloc_list);
2200 mutex_init(&seed_devices->device_list_mutex);
2201
2202 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2203 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2204 synchronize_rcu);
2205 list_for_each_entry(device, &seed_devices->devices, dev_list)
2206 device->fs_devices = seed_devices;
2207
2208 lock_chunks(root);
2209 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2210 unlock_chunks(root);
2211
2212 fs_devices->seeding = 0;
2213 fs_devices->num_devices = 0;
2214 fs_devices->open_devices = 0;
2215 fs_devices->missing_devices = 0;
2216 fs_devices->rotating = 0;
2217 fs_devices->seed = seed_devices;
2218
2219 generate_random_uuid(fs_devices->fsid);
2220 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2221 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2222 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2223
2224 super_flags = btrfs_super_flags(disk_super) &
2225 ~BTRFS_SUPER_FLAG_SEEDING;
2226 btrfs_set_super_flags(disk_super, super_flags);
2227
2228 return 0;
2229 }
2230
2231 /*
2232 * Store the expected generation for seed devices in device items.
2233 */
2234 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2235 struct btrfs_root *root)
2236 {
2237 struct btrfs_path *path;
2238 struct extent_buffer *leaf;
2239 struct btrfs_dev_item *dev_item;
2240 struct btrfs_device *device;
2241 struct btrfs_key key;
2242 u8 fs_uuid[BTRFS_UUID_SIZE];
2243 u8 dev_uuid[BTRFS_UUID_SIZE];
2244 u64 devid;
2245 int ret;
2246
2247 path = btrfs_alloc_path();
2248 if (!path)
2249 return -ENOMEM;
2250
2251 root = root->fs_info->chunk_root;
2252 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2253 key.offset = 0;
2254 key.type = BTRFS_DEV_ITEM_KEY;
2255
2256 while (1) {
2257 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2258 if (ret < 0)
2259 goto error;
2260
2261 leaf = path->nodes[0];
2262 next_slot:
2263 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2264 ret = btrfs_next_leaf(root, path);
2265 if (ret > 0)
2266 break;
2267 if (ret < 0)
2268 goto error;
2269 leaf = path->nodes[0];
2270 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2271 btrfs_release_path(path);
2272 continue;
2273 }
2274
2275 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2276 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2277 key.type != BTRFS_DEV_ITEM_KEY)
2278 break;
2279
2280 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2281 struct btrfs_dev_item);
2282 devid = btrfs_device_id(leaf, dev_item);
2283 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2284 BTRFS_UUID_SIZE);
2285 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2286 BTRFS_UUID_SIZE);
2287 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2288 fs_uuid);
2289 BUG_ON(!device); /* Logic error */
2290
2291 if (device->fs_devices->seeding) {
2292 btrfs_set_device_generation(leaf, dev_item,
2293 device->generation);
2294 btrfs_mark_buffer_dirty(leaf);
2295 }
2296
2297 path->slots[0]++;
2298 goto next_slot;
2299 }
2300 ret = 0;
2301 error:
2302 btrfs_free_path(path);
2303 return ret;
2304 }
2305
2306 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2307 {
2308 struct request_queue *q;
2309 struct btrfs_trans_handle *trans;
2310 struct btrfs_device *device;
2311 struct block_device *bdev;
2312 struct list_head *devices;
2313 struct super_block *sb = root->fs_info->sb;
2314 struct rcu_string *name;
2315 u64 tmp;
2316 int seeding_dev = 0;
2317 int ret = 0;
2318
2319 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2320 return -EROFS;
2321
2322 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2323 root->fs_info->bdev_holder);
2324 if (IS_ERR(bdev))
2325 return PTR_ERR(bdev);
2326
2327 if (root->fs_info->fs_devices->seeding) {
2328 seeding_dev = 1;
2329 down_write(&sb->s_umount);
2330 mutex_lock(&uuid_mutex);
2331 }
2332
2333 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2334
2335 devices = &root->fs_info->fs_devices->devices;
2336
2337 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2338 list_for_each_entry(device, devices, dev_list) {
2339 if (device->bdev == bdev) {
2340 ret = -EEXIST;
2341 mutex_unlock(
2342 &root->fs_info->fs_devices->device_list_mutex);
2343 goto error;
2344 }
2345 }
2346 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2347
2348 device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2349 if (IS_ERR(device)) {
2350 /* we can safely leave the fs_devices entry around */
2351 ret = PTR_ERR(device);
2352 goto error;
2353 }
2354
2355 name = rcu_string_strdup(device_path, GFP_KERNEL);
2356 if (!name) {
2357 kfree(device);
2358 ret = -ENOMEM;
2359 goto error;
2360 }
2361 rcu_assign_pointer(device->name, name);
2362
2363 trans = btrfs_start_transaction(root, 0);
2364 if (IS_ERR(trans)) {
2365 rcu_string_free(device->name);
2366 kfree(device);
2367 ret = PTR_ERR(trans);
2368 goto error;
2369 }
2370
2371 q = bdev_get_queue(bdev);
2372 if (blk_queue_discard(q))
2373 device->can_discard = 1;
2374 device->writeable = 1;
2375 device->generation = trans->transid;
2376 device->io_width = root->sectorsize;
2377 device->io_align = root->sectorsize;
2378 device->sector_size = root->sectorsize;
2379 device->total_bytes = i_size_read(bdev->bd_inode);
2380 device->disk_total_bytes = device->total_bytes;
2381 device->commit_total_bytes = device->total_bytes;
2382 device->dev_root = root->fs_info->dev_root;
2383 device->bdev = bdev;
2384 device->in_fs_metadata = 1;
2385 device->is_tgtdev_for_dev_replace = 0;
2386 device->mode = FMODE_EXCL;
2387 device->dev_stats_valid = 1;
2388 set_blocksize(device->bdev, 4096);
2389
2390 if (seeding_dev) {
2391 sb->s_flags &= ~MS_RDONLY;
2392 ret = btrfs_prepare_sprout(root);
2393 BUG_ON(ret); /* -ENOMEM */
2394 }
2395
2396 device->fs_devices = root->fs_info->fs_devices;
2397
2398 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2399 lock_chunks(root);
2400 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2401 list_add(&device->dev_alloc_list,
2402 &root->fs_info->fs_devices->alloc_list);
2403 root->fs_info->fs_devices->num_devices++;
2404 root->fs_info->fs_devices->open_devices++;
2405 root->fs_info->fs_devices->rw_devices++;
2406 root->fs_info->fs_devices->total_devices++;
2407 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2408
2409 spin_lock(&root->fs_info->free_chunk_lock);
2410 root->fs_info->free_chunk_space += device->total_bytes;
2411 spin_unlock(&root->fs_info->free_chunk_lock);
2412
2413 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2414 root->fs_info->fs_devices->rotating = 1;
2415
2416 tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2417 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2418 tmp + device->total_bytes);
2419
2420 tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2421 btrfs_set_super_num_devices(root->fs_info->super_copy,
2422 tmp + 1);
2423
2424 /* add sysfs device entry */
2425 btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2426
2427 /*
2428 * we've got more storage, clear any full flags on the space
2429 * infos
2430 */
2431 btrfs_clear_space_info_full(root->fs_info);
2432
2433 unlock_chunks(root);
2434 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2435
2436 if (seeding_dev) {
2437 lock_chunks(root);
2438 ret = init_first_rw_device(trans, root, device);
2439 unlock_chunks(root);
2440 if (ret) {
2441 btrfs_abort_transaction(trans, ret);
2442 goto error_trans;
2443 }
2444 }
2445
2446 ret = btrfs_add_device(trans, root, device);
2447 if (ret) {
2448 btrfs_abort_transaction(trans, ret);
2449 goto error_trans;
2450 }
2451
2452 if (seeding_dev) {
2453 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2454
2455 ret = btrfs_finish_sprout(trans, root);
2456 if (ret) {
2457 btrfs_abort_transaction(trans, ret);
2458 goto error_trans;
2459 }
2460
2461 /* Sprouting would change fsid of the mounted root,
2462 * so rename the fsid on the sysfs
2463 */
2464 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2465 root->fs_info->fsid);
2466 if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2467 fsid_buf))
2468 btrfs_warn(root->fs_info,
2469 "sysfs: failed to create fsid for sprout");
2470 }
2471
2472 root->fs_info->num_tolerated_disk_barrier_failures =
2473 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2474 ret = btrfs_commit_transaction(trans, root);
2475
2476 if (seeding_dev) {
2477 mutex_unlock(&uuid_mutex);
2478 up_write(&sb->s_umount);
2479
2480 if (ret) /* transaction commit */
2481 return ret;
2482
2483 ret = btrfs_relocate_sys_chunks(root);
2484 if (ret < 0)
2485 btrfs_handle_fs_error(root->fs_info, ret,
2486 "Failed to relocate sys chunks after "
2487 "device initialization. This can be fixed "
2488 "using the \"btrfs balance\" command.");
2489 trans = btrfs_attach_transaction(root);
2490 if (IS_ERR(trans)) {
2491 if (PTR_ERR(trans) == -ENOENT)
2492 return 0;
2493 return PTR_ERR(trans);
2494 }
2495 ret = btrfs_commit_transaction(trans, root);
2496 }
2497
2498 /* Update ctime/mtime for libblkid */
2499 update_dev_time(device_path);
2500 return ret;
2501
2502 error_trans:
2503 btrfs_end_transaction(trans, root);
2504 rcu_string_free(device->name);
2505 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2506 kfree(device);
2507 error:
2508 blkdev_put(bdev, FMODE_EXCL);
2509 if (seeding_dev) {
2510 mutex_unlock(&uuid_mutex);
2511 up_write(&sb->s_umount);
2512 }
2513 return ret;
2514 }
2515
2516 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2517 struct btrfs_device *srcdev,
2518 struct btrfs_device **device_out)
2519 {
2520 struct request_queue *q;
2521 struct btrfs_device *device;
2522 struct block_device *bdev;
2523 struct btrfs_fs_info *fs_info = root->fs_info;
2524 struct list_head *devices;
2525 struct rcu_string *name;
2526 u64 devid = BTRFS_DEV_REPLACE_DEVID;
2527 int ret = 0;
2528
2529 *device_out = NULL;
2530 if (fs_info->fs_devices->seeding) {
2531 btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2532 return -EINVAL;
2533 }
2534
2535 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2536 fs_info->bdev_holder);
2537 if (IS_ERR(bdev)) {
2538 btrfs_err(fs_info, "target device %s is invalid!", device_path);
2539 return PTR_ERR(bdev);
2540 }
2541
2542 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2543
2544 devices = &fs_info->fs_devices->devices;
2545 list_for_each_entry(device, devices, dev_list) {
2546 if (device->bdev == bdev) {
2547 btrfs_err(fs_info, "target device is in the filesystem!");
2548 ret = -EEXIST;
2549 goto error;
2550 }
2551 }
2552
2553
2554 if (i_size_read(bdev->bd_inode) <
2555 btrfs_device_get_total_bytes(srcdev)) {
2556 btrfs_err(fs_info, "target device is smaller than source device!");
2557 ret = -EINVAL;
2558 goto error;
2559 }
2560
2561
2562 device = btrfs_alloc_device(NULL, &devid, NULL);
2563 if (IS_ERR(device)) {
2564 ret = PTR_ERR(device);
2565 goto error;
2566 }
2567
2568 name = rcu_string_strdup(device_path, GFP_NOFS);
2569 if (!name) {
2570 kfree(device);
2571 ret = -ENOMEM;
2572 goto error;
2573 }
2574 rcu_assign_pointer(device->name, name);
2575
2576 q = bdev_get_queue(bdev);
2577 if (blk_queue_discard(q))
2578 device->can_discard = 1;
2579 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2580 device->writeable = 1;
2581 device->generation = 0;
2582 device->io_width = root->sectorsize;
2583 device->io_align = root->sectorsize;
2584 device->sector_size = root->sectorsize;
2585 device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2586 device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2587 device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2588 ASSERT(list_empty(&srcdev->resized_list));
2589 device->commit_total_bytes = srcdev->commit_total_bytes;
2590 device->commit_bytes_used = device->bytes_used;
2591 device->dev_root = fs_info->dev_root;
2592 device->bdev = bdev;
2593 device->in_fs_metadata = 1;
2594 device->is_tgtdev_for_dev_replace = 1;
2595 device->mode = FMODE_EXCL;
2596 device->dev_stats_valid = 1;
2597 set_blocksize(device->bdev, 4096);
2598 device->fs_devices = fs_info->fs_devices;
2599 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2600 fs_info->fs_devices->num_devices++;
2601 fs_info->fs_devices->open_devices++;
2602 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2603
2604 *device_out = device;
2605 return ret;
2606
2607 error:
2608 blkdev_put(bdev, FMODE_EXCL);
2609 return ret;
2610 }
2611
2612 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2613 struct btrfs_device *tgtdev)
2614 {
2615 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2616 tgtdev->io_width = fs_info->dev_root->sectorsize;
2617 tgtdev->io_align = fs_info->dev_root->sectorsize;
2618 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2619 tgtdev->dev_root = fs_info->dev_root;
2620 tgtdev->in_fs_metadata = 1;
2621 }
2622
2623 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2624 struct btrfs_device *device)
2625 {
2626 int ret;
2627 struct btrfs_path *path;
2628 struct btrfs_root *root;
2629 struct btrfs_dev_item *dev_item;
2630 struct extent_buffer *leaf;
2631 struct btrfs_key key;
2632
2633 root = device->dev_root->fs_info->chunk_root;
2634
2635 path = btrfs_alloc_path();
2636 if (!path)
2637 return -ENOMEM;
2638
2639 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2640 key.type = BTRFS_DEV_ITEM_KEY;
2641 key.offset = device->devid;
2642
2643 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2644 if (ret < 0)
2645 goto out;
2646
2647 if (ret > 0) {
2648 ret = -ENOENT;
2649 goto out;
2650 }
2651
2652 leaf = path->nodes[0];
2653 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2654
2655 btrfs_set_device_id(leaf, dev_item, device->devid);
2656 btrfs_set_device_type(leaf, dev_item, device->type);
2657 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2658 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2659 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2660 btrfs_set_device_total_bytes(leaf, dev_item,
2661 btrfs_device_get_disk_total_bytes(device));
2662 btrfs_set_device_bytes_used(leaf, dev_item,
2663 btrfs_device_get_bytes_used(device));
2664 btrfs_mark_buffer_dirty(leaf);
2665
2666 out:
2667 btrfs_free_path(path);
2668 return ret;
2669 }
2670
2671 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2672 struct btrfs_device *device, u64 new_size)
2673 {
2674 struct btrfs_super_block *super_copy =
2675 device->dev_root->fs_info->super_copy;
2676 struct btrfs_fs_devices *fs_devices;
2677 u64 old_total;
2678 u64 diff;
2679
2680 if (!device->writeable)
2681 return -EACCES;
2682
2683 lock_chunks(device->dev_root);
2684 old_total = btrfs_super_total_bytes(super_copy);
2685 diff = new_size - device->total_bytes;
2686
2687 if (new_size <= device->total_bytes ||
2688 device->is_tgtdev_for_dev_replace) {
2689 unlock_chunks(device->dev_root);
2690 return -EINVAL;
2691 }
2692
2693 fs_devices = device->dev_root->fs_info->fs_devices;
2694
2695 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2696 device->fs_devices->total_rw_bytes += diff;
2697
2698 btrfs_device_set_total_bytes(device, new_size);
2699 btrfs_device_set_disk_total_bytes(device, new_size);
2700 btrfs_clear_space_info_full(device->dev_root->fs_info);
2701 if (list_empty(&device->resized_list))
2702 list_add_tail(&device->resized_list,
2703 &fs_devices->resized_devices);
2704 unlock_chunks(device->dev_root);
2705
2706 return btrfs_update_device(trans, device);
2707 }
2708
2709 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2710 struct btrfs_root *root, u64 chunk_objectid,
2711 u64 chunk_offset)
2712 {
2713 int ret;
2714 struct btrfs_path *path;
2715 struct btrfs_key key;
2716
2717 root = root->fs_info->chunk_root;
2718 path = btrfs_alloc_path();
2719 if (!path)
2720 return -ENOMEM;
2721
2722 key.objectid = chunk_objectid;
2723 key.offset = chunk_offset;
2724 key.type = BTRFS_CHUNK_ITEM_KEY;
2725
2726 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2727 if (ret < 0)
2728 goto out;
2729 else if (ret > 0) { /* Logic error or corruption */
2730 btrfs_handle_fs_error(root->fs_info, -ENOENT,
2731 "Failed lookup while freeing chunk.");
2732 ret = -ENOENT;
2733 goto out;
2734 }
2735
2736 ret = btrfs_del_item(trans, root, path);
2737 if (ret < 0)
2738 btrfs_handle_fs_error(root->fs_info, ret,
2739 "Failed to delete chunk item.");
2740 out:
2741 btrfs_free_path(path);
2742 return ret;
2743 }
2744
2745 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2746 chunk_offset)
2747 {
2748 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2749 struct btrfs_disk_key *disk_key;
2750 struct btrfs_chunk *chunk;
2751 u8 *ptr;
2752 int ret = 0;
2753 u32 num_stripes;
2754 u32 array_size;
2755 u32 len = 0;
2756 u32 cur;
2757 struct btrfs_key key;
2758
2759 lock_chunks(root);
2760 array_size = btrfs_super_sys_array_size(super_copy);
2761
2762 ptr = super_copy->sys_chunk_array;
2763 cur = 0;
2764
2765 while (cur < array_size) {
2766 disk_key = (struct btrfs_disk_key *)ptr;
2767 btrfs_disk_key_to_cpu(&key, disk_key);
2768
2769 len = sizeof(*disk_key);
2770
2771 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2772 chunk = (struct btrfs_chunk *)(ptr + len);
2773 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2774 len += btrfs_chunk_item_size(num_stripes);
2775 } else {
2776 ret = -EIO;
2777 break;
2778 }
2779 if (key.objectid == chunk_objectid &&
2780 key.offset == chunk_offset) {
2781 memmove(ptr, ptr + len, array_size - (cur + len));
2782 array_size -= len;
2783 btrfs_set_super_sys_array_size(super_copy, array_size);
2784 } else {
2785 ptr += len;
2786 cur += len;
2787 }
2788 }
2789 unlock_chunks(root);
2790 return ret;
2791 }
2792
2793 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2794 struct btrfs_root *root, u64 chunk_offset)
2795 {
2796 struct extent_map_tree *em_tree;
2797 struct extent_map *em;
2798 struct btrfs_root *extent_root = root->fs_info->extent_root;
2799 struct map_lookup *map;
2800 u64 dev_extent_len = 0;
2801 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2802 int i, ret = 0;
2803 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2804
2805 /* Just in case */
2806 root = root->fs_info->chunk_root;
2807 em_tree = &root->fs_info->mapping_tree.map_tree;
2808
2809 read_lock(&em_tree->lock);
2810 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2811 read_unlock(&em_tree->lock);
2812
2813 if (!em || em->start > chunk_offset ||
2814 em->start + em->len < chunk_offset) {
2815 /*
2816 * This is a logic error, but we don't want to just rely on the
2817 * user having built with ASSERT enabled, so if ASSERT doesn't
2818 * do anything we still error out.
2819 */
2820 ASSERT(0);
2821 if (em)
2822 free_extent_map(em);
2823 return -EINVAL;
2824 }
2825 map = em->map_lookup;
2826 lock_chunks(root->fs_info->chunk_root);
2827 check_system_chunk(trans, extent_root, map->type);
2828 unlock_chunks(root->fs_info->chunk_root);
2829
2830 /*
2831 * Take the device list mutex to prevent races with the final phase of
2832 * a device replace operation that replaces the device object associated
2833 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2834 */
2835 mutex_lock(&fs_devices->device_list_mutex);
2836 for (i = 0; i < map->num_stripes; i++) {
2837 struct btrfs_device *device = map->stripes[i].dev;
2838 ret = btrfs_free_dev_extent(trans, device,
2839 map->stripes[i].physical,
2840 &dev_extent_len);
2841 if (ret) {
2842 mutex_unlock(&fs_devices->device_list_mutex);
2843 btrfs_abort_transaction(trans, ret);
2844 goto out;
2845 }
2846
2847 if (device->bytes_used > 0) {
2848 lock_chunks(root);
2849 btrfs_device_set_bytes_used(device,
2850 device->bytes_used - dev_extent_len);
2851 spin_lock(&root->fs_info->free_chunk_lock);
2852 root->fs_info->free_chunk_space += dev_extent_len;
2853 spin_unlock(&root->fs_info->free_chunk_lock);
2854 btrfs_clear_space_info_full(root->fs_info);
2855 unlock_chunks(root);
2856 }
2857
2858 if (map->stripes[i].dev) {
2859 ret = btrfs_update_device(trans, map->stripes[i].dev);
2860 if (ret) {
2861 mutex_unlock(&fs_devices->device_list_mutex);
2862 btrfs_abort_transaction(trans, ret);
2863 goto out;
2864 }
2865 }
2866 }
2867 mutex_unlock(&fs_devices->device_list_mutex);
2868
2869 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2870 if (ret) {
2871 btrfs_abort_transaction(trans, ret);
2872 goto out;
2873 }
2874
2875 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2876
2877 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2878 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2879 if (ret) {
2880 btrfs_abort_transaction(trans, ret);
2881 goto out;
2882 }
2883 }
2884
2885 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2886 if (ret) {
2887 btrfs_abort_transaction(trans, ret);
2888 goto out;
2889 }
2890
2891 out:
2892 /* once for us */
2893 free_extent_map(em);
2894 return ret;
2895 }
2896
2897 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2898 {
2899 struct btrfs_root *extent_root;
2900 struct btrfs_trans_handle *trans;
2901 int ret;
2902
2903 root = root->fs_info->chunk_root;
2904 extent_root = root->fs_info->extent_root;
2905
2906 /*
2907 * Prevent races with automatic removal of unused block groups.
2908 * After we relocate and before we remove the chunk with offset
2909 * chunk_offset, automatic removal of the block group can kick in,
2910 * resulting in a failure when calling btrfs_remove_chunk() below.
2911 *
2912 * Make sure to acquire this mutex before doing a tree search (dev
2913 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2914 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2915 * we release the path used to search the chunk/dev tree and before
2916 * the current task acquires this mutex and calls us.
2917 */
2918 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2919
2920 ret = btrfs_can_relocate(extent_root, chunk_offset);
2921 if (ret)
2922 return -ENOSPC;
2923
2924 /* step one, relocate all the extents inside this chunk */
2925 btrfs_scrub_pause(root);
2926 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2927 btrfs_scrub_continue(root);
2928 if (ret)
2929 return ret;
2930
2931 trans = btrfs_start_trans_remove_block_group(root->fs_info,
2932 chunk_offset);
2933 if (IS_ERR(trans)) {
2934 ret = PTR_ERR(trans);
2935 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2936 return ret;
2937 }
2938
2939 /*
2940 * step two, delete the device extents and the
2941 * chunk tree entries
2942 */
2943 ret = btrfs_remove_chunk(trans, root, chunk_offset);
2944 btrfs_end_transaction(trans, extent_root);
2945 return ret;
2946 }
2947
2948 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2949 {
2950 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2951 struct btrfs_path *path;
2952 struct extent_buffer *leaf;
2953 struct btrfs_chunk *chunk;
2954 struct btrfs_key key;
2955 struct btrfs_key found_key;
2956 u64 chunk_type;
2957 bool retried = false;
2958 int failed = 0;
2959 int ret;
2960
2961 path = btrfs_alloc_path();
2962 if (!path)
2963 return -ENOMEM;
2964
2965 again:
2966 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2967 key.offset = (u64)-1;
2968 key.type = BTRFS_CHUNK_ITEM_KEY;
2969
2970 while (1) {
2971 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2972 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2973 if (ret < 0) {
2974 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2975 goto error;
2976 }
2977 BUG_ON(ret == 0); /* Corruption */
2978
2979 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2980 key.type);
2981 if (ret)
2982 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2983 if (ret < 0)
2984 goto error;
2985 if (ret > 0)
2986 break;
2987
2988 leaf = path->nodes[0];
2989 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2990
2991 chunk = btrfs_item_ptr(leaf, path->slots[0],
2992 struct btrfs_chunk);
2993 chunk_type = btrfs_chunk_type(leaf, chunk);
2994 btrfs_release_path(path);
2995
2996 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2997 ret = btrfs_relocate_chunk(chunk_root,
2998 found_key.offset);
2999 if (ret == -ENOSPC)
3000 failed++;
3001 else
3002 BUG_ON(ret);
3003 }
3004 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
3005
3006 if (found_key.offset == 0)
3007 break;
3008 key.offset = found_key.offset - 1;
3009 }
3010 ret = 0;
3011 if (failed && !retried) {
3012 failed = 0;
3013 retried = true;
3014 goto again;
3015 } else if (WARN_ON(failed && retried)) {
3016 ret = -ENOSPC;
3017 }
3018 error:
3019 btrfs_free_path(path);
3020 return ret;
3021 }
3022
3023 static int insert_balance_item(struct btrfs_root *root,
3024 struct btrfs_balance_control *bctl)
3025 {
3026 struct btrfs_trans_handle *trans;
3027 struct btrfs_balance_item *item;
3028 struct btrfs_disk_balance_args disk_bargs;
3029 struct btrfs_path *path;
3030 struct extent_buffer *leaf;
3031 struct btrfs_key key;
3032 int ret, err;
3033
3034 path = btrfs_alloc_path();
3035 if (!path)
3036 return -ENOMEM;
3037
3038 trans = btrfs_start_transaction(root, 0);
3039 if (IS_ERR(trans)) {
3040 btrfs_free_path(path);
3041 return PTR_ERR(trans);
3042 }
3043
3044 key.objectid = BTRFS_BALANCE_OBJECTID;
3045 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3046 key.offset = 0;
3047
3048 ret = btrfs_insert_empty_item(trans, root, path, &key,
3049 sizeof(*item));
3050 if (ret)
3051 goto out;
3052
3053 leaf = path->nodes[0];
3054 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3055
3056 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
3057
3058 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3059 btrfs_set_balance_data(leaf, item, &disk_bargs);
3060 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3061 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3062 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3063 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3064
3065 btrfs_set_balance_flags(leaf, item, bctl->flags);
3066
3067 btrfs_mark_buffer_dirty(leaf);
3068 out:
3069 btrfs_free_path(path);
3070 err = btrfs_commit_transaction(trans, root);
3071 if (err && !ret)
3072 ret = err;
3073 return ret;
3074 }
3075
3076 static int del_balance_item(struct btrfs_root *root)
3077 {
3078 struct btrfs_trans_handle *trans;
3079 struct btrfs_path *path;
3080 struct btrfs_key key;
3081 int ret, err;
3082
3083 path = btrfs_alloc_path();
3084 if (!path)
3085 return -ENOMEM;
3086
3087 trans = btrfs_start_transaction(root, 0);
3088 if (IS_ERR(trans)) {
3089 btrfs_free_path(path);
3090 return PTR_ERR(trans);
3091 }
3092
3093 key.objectid = BTRFS_BALANCE_OBJECTID;
3094 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3095 key.offset = 0;
3096
3097 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3098 if (ret < 0)
3099 goto out;
3100 if (ret > 0) {
3101 ret = -ENOENT;
3102 goto out;
3103 }
3104
3105 ret = btrfs_del_item(trans, root, path);
3106 out:
3107 btrfs_free_path(path);
3108 err = btrfs_commit_transaction(trans, root);
3109 if (err && !ret)
3110 ret = err;
3111 return ret;
3112 }
3113
3114 /*
3115 * This is a heuristic used to reduce the number of chunks balanced on
3116 * resume after balance was interrupted.
3117 */
3118 static void update_balance_args(struct btrfs_balance_control *bctl)
3119 {
3120 /*
3121 * Turn on soft mode for chunk types that were being converted.
3122 */
3123 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3124 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3125 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3126 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3127 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3128 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3129
3130 /*
3131 * Turn on usage filter if is not already used. The idea is
3132 * that chunks that we have already balanced should be
3133 * reasonably full. Don't do it for chunks that are being
3134 * converted - that will keep us from relocating unconverted
3135 * (albeit full) chunks.
3136 */
3137 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3138 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3139 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3140 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3141 bctl->data.usage = 90;
3142 }
3143 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3144 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3145 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3146 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3147 bctl->sys.usage = 90;
3148 }
3149 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3150 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3151 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3152 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3153 bctl->meta.usage = 90;
3154 }
3155 }
3156
3157 /*
3158 * Should be called with both balance and volume mutexes held to
3159 * serialize other volume operations (add_dev/rm_dev/resize) with
3160 * restriper. Same goes for unset_balance_control.
3161 */
3162 static void set_balance_control(struct btrfs_balance_control *bctl)
3163 {
3164 struct btrfs_fs_info *fs_info = bctl->fs_info;
3165
3166 BUG_ON(fs_info->balance_ctl);
3167
3168 spin_lock(&fs_info->balance_lock);
3169 fs_info->balance_ctl = bctl;
3170 spin_unlock(&fs_info->balance_lock);
3171 }
3172
3173 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3174 {
3175 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3176
3177 BUG_ON(!fs_info->balance_ctl);
3178
3179 spin_lock(&fs_info->balance_lock);
3180 fs_info->balance_ctl = NULL;
3181 spin_unlock(&fs_info->balance_lock);
3182
3183 kfree(bctl);
3184 }
3185
3186 /*
3187 * Balance filters. Return 1 if chunk should be filtered out
3188 * (should not be balanced).
3189 */
3190 static int chunk_profiles_filter(u64 chunk_type,
3191 struct btrfs_balance_args *bargs)
3192 {
3193 chunk_type = chunk_to_extended(chunk_type) &
3194 BTRFS_EXTENDED_PROFILE_MASK;
3195
3196 if (bargs->profiles & chunk_type)
3197 return 0;
3198
3199 return 1;
3200 }
3201
3202 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3203 struct btrfs_balance_args *bargs)
3204 {
3205 struct btrfs_block_group_cache *cache;
3206 u64 chunk_used;
3207 u64 user_thresh_min;
3208 u64 user_thresh_max;
3209 int ret = 1;
3210
3211 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3212 chunk_used = btrfs_block_group_used(&cache->item);
3213
3214 if (bargs->usage_min == 0)
3215 user_thresh_min = 0;
3216 else
3217 user_thresh_min = div_factor_fine(cache->key.offset,
3218 bargs->usage_min);
3219
3220 if (bargs->usage_max == 0)
3221 user_thresh_max = 1;
3222 else if (bargs->usage_max > 100)
3223 user_thresh_max = cache->key.offset;
3224 else
3225 user_thresh_max = div_factor_fine(cache->key.offset,
3226 bargs->usage_max);
3227
3228 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3229 ret = 0;
3230
3231 btrfs_put_block_group(cache);
3232 return ret;
3233 }
3234
3235 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3236 u64 chunk_offset, struct btrfs_balance_args *bargs)
3237 {
3238 struct btrfs_block_group_cache *cache;
3239 u64 chunk_used, user_thresh;
3240 int ret = 1;
3241
3242 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3243 chunk_used = btrfs_block_group_used(&cache->item);
3244
3245 if (bargs->usage_min == 0)
3246 user_thresh = 1;
3247 else if (bargs->usage > 100)
3248 user_thresh = cache->key.offset;
3249 else
3250 user_thresh = div_factor_fine(cache->key.offset,
3251 bargs->usage);
3252
3253 if (chunk_used < user_thresh)
3254 ret = 0;
3255
3256 btrfs_put_block_group(cache);
3257 return ret;
3258 }
3259
3260 static int chunk_devid_filter(struct extent_buffer *leaf,
3261 struct btrfs_chunk *chunk,
3262 struct btrfs_balance_args *bargs)
3263 {
3264 struct btrfs_stripe *stripe;
3265 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3266 int i;
3267
3268 for (i = 0; i < num_stripes; i++) {
3269 stripe = btrfs_stripe_nr(chunk, i);
3270 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3271 return 0;
3272 }
3273
3274 return 1;
3275 }
3276
3277 /* [pstart, pend) */
3278 static int chunk_drange_filter(struct extent_buffer *leaf,
3279 struct btrfs_chunk *chunk,
3280 u64 chunk_offset,
3281 struct btrfs_balance_args *bargs)
3282 {
3283 struct btrfs_stripe *stripe;
3284 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3285 u64 stripe_offset;
3286 u64 stripe_length;
3287 int factor;
3288 int i;
3289
3290 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3291 return 0;
3292
3293 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3294 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3295 factor = num_stripes / 2;
3296 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3297 factor = num_stripes - 1;
3298 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3299 factor = num_stripes - 2;
3300 } else {
3301 factor = num_stripes;
3302 }
3303
3304 for (i = 0; i < num_stripes; i++) {
3305 stripe = btrfs_stripe_nr(chunk, i);
3306 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3307 continue;
3308
3309 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3310 stripe_length = btrfs_chunk_length(leaf, chunk);
3311 stripe_length = div_u64(stripe_length, factor);
3312
3313 if (stripe_offset < bargs->pend &&
3314 stripe_offset + stripe_length > bargs->pstart)
3315 return 0;
3316 }
3317
3318 return 1;
3319 }
3320
3321 /* [vstart, vend) */
3322 static int chunk_vrange_filter(struct extent_buffer *leaf,
3323 struct btrfs_chunk *chunk,
3324 u64 chunk_offset,
3325 struct btrfs_balance_args *bargs)
3326 {
3327 if (chunk_offset < bargs->vend &&
3328 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3329 /* at least part of the chunk is inside this vrange */
3330 return 0;
3331
3332 return 1;
3333 }
3334
3335 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3336 struct btrfs_chunk *chunk,
3337 struct btrfs_balance_args *bargs)
3338 {
3339 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3340
3341 if (bargs->stripes_min <= num_stripes
3342 && num_stripes <= bargs->stripes_max)
3343 return 0;
3344
3345 return 1;
3346 }
3347
3348 static int chunk_soft_convert_filter(u64 chunk_type,
3349 struct btrfs_balance_args *bargs)
3350 {
3351 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3352 return 0;
3353
3354 chunk_type = chunk_to_extended(chunk_type) &
3355 BTRFS_EXTENDED_PROFILE_MASK;
3356
3357 if (bargs->target == chunk_type)
3358 return 1;
3359
3360 return 0;
3361 }
3362
3363 static int should_balance_chunk(struct btrfs_root *root,
3364 struct extent_buffer *leaf,
3365 struct btrfs_chunk *chunk, u64 chunk_offset)
3366 {
3367 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3368 struct btrfs_balance_args *bargs = NULL;
3369 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3370
3371 /* type filter */
3372 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3373 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3374 return 0;
3375 }
3376
3377 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3378 bargs = &bctl->data;
3379 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3380 bargs = &bctl->sys;
3381 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3382 bargs = &bctl->meta;
3383
3384 /* profiles filter */
3385 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3386 chunk_profiles_filter(chunk_type, bargs)) {
3387 return 0;
3388 }
3389
3390 /* usage filter */
3391 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3392 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3393 return 0;
3394 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3395 chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3396 return 0;
3397 }
3398
3399 /* devid filter */
3400 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3401 chunk_devid_filter(leaf, chunk, bargs)) {
3402 return 0;
3403 }
3404
3405 /* drange filter, makes sense only with devid filter */
3406 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3407 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3408 return 0;
3409 }
3410
3411 /* vrange filter */
3412 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3413 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3414 return 0;
3415 }
3416
3417 /* stripes filter */
3418 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3419 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3420 return 0;
3421 }
3422
3423 /* soft profile changing mode */
3424 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3425 chunk_soft_convert_filter(chunk_type, bargs)) {
3426 return 0;
3427 }
3428
3429 /*
3430 * limited by count, must be the last filter
3431 */
3432 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3433 if (bargs->limit == 0)
3434 return 0;
3435 else
3436 bargs->limit--;
3437 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3438 /*
3439 * Same logic as the 'limit' filter; the minimum cannot be
3440 * determined here because we do not have the global information
3441 * about the count of all chunks that satisfy the filters.
3442 */
3443 if (bargs->limit_max == 0)
3444 return 0;
3445 else
3446 bargs->limit_max--;
3447 }
3448
3449 return 1;
3450 }
3451
3452 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3453 {
3454 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3455 struct btrfs_root *chunk_root = fs_info->chunk_root;
3456 struct btrfs_root *dev_root = fs_info->dev_root;
3457 struct list_head *devices;
3458 struct btrfs_device *device;
3459 u64 old_size;
3460 u64 size_to_free;
3461 u64 chunk_type;
3462 struct btrfs_chunk *chunk;
3463 struct btrfs_path *path = NULL;
3464 struct btrfs_key key;
3465 struct btrfs_key found_key;
3466 struct btrfs_trans_handle *trans;
3467 struct extent_buffer *leaf;
3468 int slot;
3469 int ret;
3470 int enospc_errors = 0;
3471 bool counting = true;
3472 /* The single value limit and min/max limits use the same bytes in the */
3473 u64 limit_data = bctl->data.limit;
3474 u64 limit_meta = bctl->meta.limit;
3475 u64 limit_sys = bctl->sys.limit;
3476 u32 count_data = 0;
3477 u32 count_meta = 0;
3478 u32 count_sys = 0;
3479 int chunk_reserved = 0;
3480 u64 bytes_used = 0;
3481
3482 /* step one make some room on all the devices */
3483 devices = &fs_info->fs_devices->devices;
3484 list_for_each_entry(device, devices, dev_list) {
3485 old_size = btrfs_device_get_total_bytes(device);
3486 size_to_free = div_factor(old_size, 1);
3487 size_to_free = min_t(u64, size_to_free, SZ_1M);
3488 if (!device->writeable ||
3489 btrfs_device_get_total_bytes(device) -
3490 btrfs_device_get_bytes_used(device) > size_to_free ||
3491 device->is_tgtdev_for_dev_replace)
3492 continue;
3493
3494 ret = btrfs_shrink_device(device, old_size - size_to_free);
3495 if (ret == -ENOSPC)
3496 break;
3497 if (ret) {
3498 /* btrfs_shrink_device never returns ret > 0 */
3499 WARN_ON(ret > 0);
3500 goto error;
3501 }
3502
3503 trans = btrfs_start_transaction(dev_root, 0);
3504 if (IS_ERR(trans)) {
3505 ret = PTR_ERR(trans);
3506 btrfs_info_in_rcu(fs_info,
3507 "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
3508 rcu_str_deref(device->name), ret,
3509 old_size, old_size - size_to_free);
3510 goto error;
3511 }
3512
3513 ret = btrfs_grow_device(trans, device, old_size);
3514 if (ret) {
3515 btrfs_end_transaction(trans, dev_root);
3516 /* btrfs_grow_device never returns ret > 0 */
3517 WARN_ON(ret > 0);
3518 btrfs_info_in_rcu(fs_info,
3519 "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
3520 rcu_str_deref(device->name), ret,
3521 old_size, old_size - size_to_free);
3522 goto error;
3523 }
3524
3525 btrfs_end_transaction(trans, dev_root);
3526 }
3527
3528 /* step two, relocate all the chunks */
3529 path = btrfs_alloc_path();
3530 if (!path) {
3531 ret = -ENOMEM;
3532 goto error;
3533 }
3534
3535 /* zero out stat counters */
3536 spin_lock(&fs_info->balance_lock);
3537 memset(&bctl->stat, 0, sizeof(bctl->stat));
3538 spin_unlock(&fs_info->balance_lock);
3539 again:
3540 if (!counting) {
3541 /*
3542 * The single value limit and min/max limits use the same bytes
3543 * in the
3544 */
3545 bctl->data.limit = limit_data;
3546 bctl->meta.limit = limit_meta;
3547 bctl->sys.limit = limit_sys;
3548 }
3549 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3550 key.offset = (u64)-1;
3551 key.type = BTRFS_CHUNK_ITEM_KEY;
3552
3553 while (1) {
3554 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3555 atomic_read(&fs_info->balance_cancel_req)) {
3556 ret = -ECANCELED;
3557 goto error;
3558 }
3559
3560 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3561 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3562 if (ret < 0) {
3563 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3564 goto error;
3565 }
3566
3567 /*
3568 * this shouldn't happen, it means the last relocate
3569 * failed
3570 */
3571 if (ret == 0)
3572 BUG(); /* FIXME break ? */
3573
3574 ret = btrfs_previous_item(chunk_root, path, 0,
3575 BTRFS_CHUNK_ITEM_KEY);
3576 if (ret) {
3577 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3578 ret = 0;
3579 break;
3580 }
3581
3582 leaf = path->nodes[0];
3583 slot = path->slots[0];
3584 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3585
3586 if (found_key.objectid != key.objectid) {
3587 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3588 break;
3589 }
3590
3591 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3592 chunk_type = btrfs_chunk_type(leaf, chunk);
3593
3594 if (!counting) {
3595 spin_lock(&fs_info->balance_lock);
3596 bctl->stat.considered++;
3597 spin_unlock(&fs_info->balance_lock);
3598 }
3599
3600 ret = should_balance_chunk(chunk_root, leaf, chunk,
3601 found_key.offset);
3602
3603 btrfs_release_path(path);
3604 if (!ret) {
3605 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3606 goto loop;
3607 }
3608
3609 if (counting) {
3610 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3611 spin_lock(&fs_info->balance_lock);
3612 bctl->stat.expected++;
3613 spin_unlock(&fs_info->balance_lock);
3614
3615 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3616 count_data++;
3617 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3618 count_sys++;
3619 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3620 count_meta++;
3621
3622 goto loop;
3623 }
3624
3625 /*
3626 * Apply limit_min filter, no need to check if the LIMITS
3627 * filter is used, limit_min is 0 by default
3628 */
3629 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3630 count_data < bctl->data.limit_min)
3631 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3632 count_meta < bctl->meta.limit_min)
3633 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3634 count_sys < bctl->sys.limit_min)) {
3635 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3636 goto loop;
3637 }
3638
3639 ASSERT(fs_info->data_sinfo);
3640 spin_lock(&fs_info->data_sinfo->lock);
3641 bytes_used = fs_info->data_sinfo->bytes_used;
3642 spin_unlock(&fs_info->data_sinfo->lock);
3643
3644 if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3645 !chunk_reserved && !bytes_used) {
3646 trans = btrfs_start_transaction(chunk_root, 0);
3647 if (IS_ERR(trans)) {
3648 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3649 ret = PTR_ERR(trans);
3650 goto error;
3651 }
3652
3653 ret = btrfs_force_chunk_alloc(trans, chunk_root,
3654 BTRFS_BLOCK_GROUP_DATA);
3655 btrfs_end_transaction(trans, chunk_root);
3656 if (ret < 0) {
3657 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3658 goto error;
3659 }
3660 chunk_reserved = 1;
3661 }
3662
3663 ret = btrfs_relocate_chunk(chunk_root,
3664 found_key.offset);
3665 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3666 if (ret && ret != -ENOSPC)
3667 goto error;
3668 if (ret == -ENOSPC) {
3669 enospc_errors++;
3670 } else {
3671 spin_lock(&fs_info->balance_lock);
3672 bctl->stat.completed++;
3673 spin_unlock(&fs_info->balance_lock);
3674 }
3675 loop:
3676 if (found_key.offset == 0)
3677 break;
3678 key.offset = found_key.offset - 1;
3679 }
3680
3681 if (counting) {
3682 btrfs_release_path(path);
3683 counting = false;
3684 goto again;
3685 }
3686 error:
3687 btrfs_free_path(path);
3688 if (enospc_errors) {
3689 btrfs_info(fs_info, "%d enospc errors during balance",
3690 enospc_errors);
3691 if (!ret)
3692 ret = -ENOSPC;
3693 }
3694
3695 return ret;
3696 }
3697
3698 /**
3699 * alloc_profile_is_valid - see if a given profile is valid and reduced
3700 * @flags: profile to validate
3701 * @extended: if true @flags is treated as an extended profile
3702 */
3703 static int alloc_profile_is_valid(u64 flags, int extended)
3704 {
3705 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3706 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3707
3708 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3709
3710 /* 1) check that all other bits are zeroed */
3711 if (flags & ~mask)
3712 return 0;
3713
3714 /* 2) see if profile is reduced */
3715 if (flags == 0)
3716 return !extended; /* "0" is valid for usual profiles */
3717
3718 /* true if exactly one bit set */
3719 return (flags & (flags - 1)) == 0;
3720 }
3721
3722 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3723 {
3724 /* cancel requested || normal exit path */
3725 return atomic_read(&fs_info->balance_cancel_req) ||
3726 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3727 atomic_read(&fs_info->balance_cancel_req) == 0);
3728 }
3729
3730 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3731 {
3732 int ret;
3733
3734 unset_balance_control(fs_info);
3735 ret = del_balance_item(fs_info->tree_root);
3736 if (ret)
3737 btrfs_handle_fs_error(fs_info, ret, NULL);
3738
3739 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3740 }
3741
3742 /* Non-zero return value signifies invalidity */
3743 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3744 u64 allowed)
3745 {
3746 return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3747 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3748 (bctl_arg->target & ~allowed)));
3749 }
3750
3751 /*
3752 * Should be called with both balance and volume mutexes held
3753 */
3754 int btrfs_balance(struct btrfs_balance_control *bctl,
3755 struct btrfs_ioctl_balance_args *bargs)
3756 {
3757 struct btrfs_fs_info *fs_info = bctl->fs_info;
3758 u64 allowed;
3759 int mixed = 0;
3760 int ret;
3761 u64 num_devices;
3762 unsigned seq;
3763
3764 if (btrfs_fs_closing(fs_info) ||
3765 atomic_read(&fs_info->balance_pause_req) ||
3766 atomic_read(&fs_info->balance_cancel_req)) {
3767 ret = -EINVAL;
3768 goto out;
3769 }
3770
3771 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3772 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3773 mixed = 1;
3774
3775 /*
3776 * In case of mixed groups both data and meta should be picked,
3777 * and identical options should be given for both of them.
3778 */
3779 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3780 if (mixed && (bctl->flags & allowed)) {
3781 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3782 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3783 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3784 btrfs_err(fs_info, "with mixed groups data and "
3785 "metadata balance options must be the same");
3786 ret = -EINVAL;
3787 goto out;
3788 }
3789 }
3790
3791 num_devices = fs_info->fs_devices->num_devices;
3792 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3793 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3794 BUG_ON(num_devices < 1);
3795 num_devices--;
3796 }
3797 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3798 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
3799 if (num_devices > 1)
3800 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3801 if (num_devices > 2)
3802 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3803 if (num_devices > 3)
3804 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3805 BTRFS_BLOCK_GROUP_RAID6);
3806 if (validate_convert_profile(&bctl->data, allowed)) {
3807 btrfs_err(fs_info, "unable to start balance with target "
3808 "data profile %llu",
3809 bctl->data.target);
3810 ret = -EINVAL;
3811 goto out;
3812 }
3813 if (validate_convert_profile(&bctl->meta, allowed)) {
3814 btrfs_err(fs_info,
3815 "unable to start balance with target metadata profile %llu",
3816 bctl->meta.target);
3817 ret = -EINVAL;
3818 goto out;
3819 }
3820 if (validate_convert_profile(&bctl->sys, allowed)) {
3821 btrfs_err(fs_info,
3822 "unable to start balance with target system profile %llu",
3823 bctl->sys.target);
3824 ret = -EINVAL;
3825 goto out;
3826 }
3827
3828 /* allow to reduce meta or sys integrity only if force set */
3829 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3830 BTRFS_BLOCK_GROUP_RAID10 |
3831 BTRFS_BLOCK_GROUP_RAID5 |
3832 BTRFS_BLOCK_GROUP_RAID6;
3833 do {
3834 seq = read_seqbegin(&fs_info->profiles_lock);
3835
3836 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3837 (fs_info->avail_system_alloc_bits & allowed) &&
3838 !(bctl->sys.target & allowed)) ||
3839 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3840 (fs_info->avail_metadata_alloc_bits & allowed) &&
3841 !(bctl->meta.target & allowed))) {
3842 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3843 btrfs_info(fs_info, "force reducing metadata integrity");
3844 } else {
3845 btrfs_err(fs_info, "balance will reduce metadata "
3846 "integrity, use force if you want this");
3847 ret = -EINVAL;
3848 goto out;
3849 }
3850 }
3851 } while (read_seqretry(&fs_info->profiles_lock, seq));
3852
3853 if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3854 btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3855 btrfs_warn(fs_info,
3856 "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3857 bctl->meta.target, bctl->data.target);
3858 }
3859
3860 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3861 fs_info->num_tolerated_disk_barrier_failures = min(
3862 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3863 btrfs_get_num_tolerated_disk_barrier_failures(
3864 bctl->sys.target));
3865 }
3866
3867 ret = insert_balance_item(fs_info->tree_root, bctl);
3868 if (ret && ret != -EEXIST)
3869 goto out;
3870
3871 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3872 BUG_ON(ret == -EEXIST);
3873 set_balance_control(bctl);
3874 } else {
3875 BUG_ON(ret != -EEXIST);
3876 spin_lock(&fs_info->balance_lock);
3877 update_balance_args(bctl);
3878 spin_unlock(&fs_info->balance_lock);
3879 }
3880
3881 atomic_inc(&fs_info->balance_running);
3882 mutex_unlock(&fs_info->balance_mutex);
3883
3884 ret = __btrfs_balance(fs_info);
3885
3886 mutex_lock(&fs_info->balance_mutex);
3887 atomic_dec(&fs_info->balance_running);
3888
3889 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3890 fs_info->num_tolerated_disk_barrier_failures =
3891 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3892 }
3893
3894 if (bargs) {
3895 memset(bargs, 0, sizeof(*bargs));
3896 update_ioctl_balance_args(fs_info, 0, bargs);
3897 }
3898
3899 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3900 balance_need_close(fs_info)) {
3901 __cancel_balance(fs_info);
3902 }
3903
3904 wake_up(&fs_info->balance_wait_q);
3905
3906 return ret;
3907 out:
3908 if (bctl->flags & BTRFS_BALANCE_RESUME)
3909 __cancel_balance(fs_info);
3910 else {
3911 kfree(bctl);
3912 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3913 }
3914 return ret;
3915 }
3916
3917 static int balance_kthread(void *data)
3918 {
3919 struct btrfs_fs_info *fs_info = data;
3920 int ret = 0;
3921
3922 mutex_lock(&fs_info->volume_mutex);
3923 mutex_lock(&fs_info->balance_mutex);
3924
3925 if (fs_info->balance_ctl) {
3926 btrfs_info(fs_info, "continuing balance");
3927 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3928 }
3929
3930 mutex_unlock(&fs_info->balance_mutex);
3931 mutex_unlock(&fs_info->volume_mutex);
3932
3933 return ret;
3934 }
3935
3936 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3937 {
3938 struct task_struct *tsk;
3939
3940 spin_lock(&fs_info->balance_lock);
3941 if (!fs_info->balance_ctl) {
3942 spin_unlock(&fs_info->balance_lock);
3943 return 0;
3944 }
3945 spin_unlock(&fs_info->balance_lock);
3946
3947 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
3948 btrfs_info(fs_info, "force skipping balance");
3949 return 0;
3950 }
3951
3952 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3953 return PTR_ERR_OR_ZERO(tsk);
3954 }
3955
3956 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3957 {
3958 struct btrfs_balance_control *bctl;
3959 struct btrfs_balance_item *item;
3960 struct btrfs_disk_balance_args disk_bargs;
3961 struct btrfs_path *path;
3962 struct extent_buffer *leaf;
3963 struct btrfs_key key;
3964 int ret;
3965
3966 path = btrfs_alloc_path();
3967 if (!path)
3968 return -ENOMEM;
3969
3970 key.objectid = BTRFS_BALANCE_OBJECTID;
3971 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3972 key.offset = 0;
3973
3974 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3975 if (ret < 0)
3976 goto out;
3977 if (ret > 0) { /* ret = -ENOENT; */
3978 ret = 0;
3979 goto out;
3980 }
3981
3982 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3983 if (!bctl) {
3984 ret = -ENOMEM;
3985 goto out;
3986 }
3987
3988 leaf = path->nodes[0];
3989 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3990
3991 bctl->fs_info = fs_info;
3992 bctl->flags = btrfs_balance_flags(leaf, item);
3993 bctl->flags |= BTRFS_BALANCE_RESUME;
3994
3995 btrfs_balance_data(leaf, item, &disk_bargs);
3996 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3997 btrfs_balance_meta(leaf, item, &disk_bargs);
3998 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3999 btrfs_balance_sys(leaf, item, &disk_bargs);
4000 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4001
4002 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
4003
4004 mutex_lock(&fs_info->volume_mutex);
4005 mutex_lock(&fs_info->balance_mutex);
4006
4007 set_balance_control(bctl);
4008
4009 mutex_unlock(&fs_info->balance_mutex);
4010 mutex_unlock(&fs_info->volume_mutex);
4011 out:
4012 btrfs_free_path(path);
4013 return ret;
4014 }
4015
4016 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4017 {
4018 int ret = 0;
4019
4020 mutex_lock(&fs_info->balance_mutex);
4021 if (!fs_info->balance_ctl) {
4022 mutex_unlock(&fs_info->balance_mutex);
4023 return -ENOTCONN;
4024 }
4025
4026 if (atomic_read(&fs_info->balance_running)) {
4027 atomic_inc(&fs_info->balance_pause_req);
4028 mutex_unlock(&fs_info->balance_mutex);
4029
4030 wait_event(fs_info->balance_wait_q,
4031 atomic_read(&fs_info->balance_running) == 0);
4032
4033 mutex_lock(&fs_info->balance_mutex);
4034 /* we are good with balance_ctl ripped off from under us */
4035 BUG_ON(atomic_read(&fs_info->balance_running));
4036 atomic_dec(&fs_info->balance_pause_req);
4037 } else {
4038 ret = -ENOTCONN;
4039 }
4040
4041 mutex_unlock(&fs_info->balance_mutex);
4042 return ret;
4043 }
4044
4045 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4046 {
4047 if (fs_info->sb->s_flags & MS_RDONLY)
4048 return -EROFS;
4049
4050 mutex_lock(&fs_info->balance_mutex);
4051 if (!fs_info->balance_ctl) {
4052 mutex_unlock(&fs_info->balance_mutex);
4053 return -ENOTCONN;
4054 }
4055
4056 atomic_inc(&fs_info->balance_cancel_req);
4057 /*
4058 * if we are running just wait and return, balance item is
4059 * deleted in btrfs_balance in this case
4060 */
4061 if (atomic_read(&fs_info->balance_running)) {
4062 mutex_unlock(&fs_info->balance_mutex);
4063 wait_event(fs_info->balance_wait_q,
4064 atomic_read(&fs_info->balance_running) == 0);
4065 mutex_lock(&fs_info->balance_mutex);
4066 } else {
4067 /* __cancel_balance needs volume_mutex */
4068 mutex_unlock(&fs_info->balance_mutex);
4069 mutex_lock(&fs_info->volume_mutex);
4070 mutex_lock(&fs_info->balance_mutex);
4071
4072 if (fs_info->balance_ctl)
4073 __cancel_balance(fs_info);
4074
4075 mutex_unlock(&fs_info->volume_mutex);
4076 }
4077
4078 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
4079 atomic_dec(&fs_info->balance_cancel_req);
4080 mutex_unlock(&fs_info->balance_mutex);
4081 return 0;
4082 }
4083
4084 static int btrfs_uuid_scan_kthread(void *data)
4085 {
4086 struct btrfs_fs_info *fs_info = data;
4087 struct btrfs_root *root = fs_info->tree_root;
4088 struct btrfs_key key;
4089 struct btrfs_key max_key;
4090 struct btrfs_path *path = NULL;
4091 int ret = 0;
4092 struct extent_buffer *eb;
4093 int slot;
4094 struct btrfs_root_item root_item;
4095 u32 item_size;
4096 struct btrfs_trans_handle *trans = NULL;
4097
4098 path = btrfs_alloc_path();
4099 if (!path) {
4100 ret = -ENOMEM;
4101 goto out;
4102 }
4103
4104 key.objectid = 0;
4105 key.type = BTRFS_ROOT_ITEM_KEY;
4106 key.offset = 0;
4107
4108 max_key.objectid = (u64)-1;
4109 max_key.type = BTRFS_ROOT_ITEM_KEY;
4110 max_key.offset = (u64)-1;
4111
4112 while (1) {
4113 ret = btrfs_search_forward(root, &key, path, 0);
4114 if (ret) {
4115 if (ret > 0)
4116 ret = 0;
4117 break;
4118 }
4119
4120 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4121 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4122 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4123 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4124 goto skip;
4125
4126 eb = path->nodes[0];
4127 slot = path->slots[0];
4128 item_size = btrfs_item_size_nr(eb, slot);
4129 if (item_size < sizeof(root_item))
4130 goto skip;
4131
4132 read_extent_buffer(eb, &root_item,
4133 btrfs_item_ptr_offset(eb, slot),
4134 (int)sizeof(root_item));
4135 if (btrfs_root_refs(&root_item) == 0)
4136 goto skip;
4137
4138 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4139 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4140 if (trans)
4141 goto update_tree;
4142
4143 btrfs_release_path(path);
4144 /*
4145 * 1 - subvol uuid item
4146 * 1 - received_subvol uuid item
4147 */
4148 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4149 if (IS_ERR(trans)) {
4150 ret = PTR_ERR(trans);
4151 break;
4152 }
4153 continue;
4154 } else {
4155 goto skip;
4156 }
4157 update_tree:
4158 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4159 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4160 root_item.uuid,
4161 BTRFS_UUID_KEY_SUBVOL,
4162 key.objectid);
4163 if (ret < 0) {
4164 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4165 ret);
4166 break;
4167 }
4168 }
4169
4170 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4171 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4172 root_item.received_uuid,
4173 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4174 key.objectid);
4175 if (ret < 0) {
4176 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4177 ret);
4178 break;
4179 }
4180 }
4181
4182 skip:
4183 if (trans) {
4184 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4185 trans = NULL;
4186 if (ret)
4187 break;
4188 }
4189
4190 btrfs_release_path(path);
4191 if (key.offset < (u64)-1) {
4192 key.offset++;
4193 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4194 key.offset = 0;
4195 key.type = BTRFS_ROOT_ITEM_KEY;
4196 } else if (key.objectid < (u64)-1) {
4197 key.offset = 0;
4198 key.type = BTRFS_ROOT_ITEM_KEY;
4199 key.objectid++;
4200 } else {
4201 break;
4202 }
4203 cond_resched();
4204 }
4205
4206 out:
4207 btrfs_free_path(path);
4208 if (trans && !IS_ERR(trans))
4209 btrfs_end_transaction(trans, fs_info->uuid_root);
4210 if (ret)
4211 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4212 else
4213 fs_info->update_uuid_tree_gen = 1;
4214 up(&fs_info->uuid_tree_rescan_sem);
4215 return 0;
4216 }
4217
4218 /*
4219 * Callback for btrfs_uuid_tree_iterate().
4220 * returns:
4221 * 0 check succeeded, the entry is not outdated.
4222 * < 0 if an error occurred.
4223 * > 0 if the check failed, which means the caller shall remove the entry.
4224 */
4225 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4226 u8 *uuid, u8 type, u64 subid)
4227 {
4228 struct btrfs_key key;
4229 int ret = 0;
4230 struct btrfs_root *subvol_root;
4231
4232 if (type != BTRFS_UUID_KEY_SUBVOL &&
4233 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4234 goto out;
4235
4236 key.objectid = subid;
4237 key.type = BTRFS_ROOT_ITEM_KEY;
4238 key.offset = (u64)-1;
4239 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4240 if (IS_ERR(subvol_root)) {
4241 ret = PTR_ERR(subvol_root);
4242 if (ret == -ENOENT)
4243 ret = 1;
4244 goto out;
4245 }
4246
4247 switch (type) {
4248 case BTRFS_UUID_KEY_SUBVOL:
4249 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4250 ret = 1;
4251 break;
4252 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4253 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4254 BTRFS_UUID_SIZE))
4255 ret = 1;
4256 break;
4257 }
4258
4259 out:
4260 return ret;
4261 }
4262
4263 static int btrfs_uuid_rescan_kthread(void *data)
4264 {
4265 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4266 int ret;
4267
4268 /*
4269 * 1st step is to iterate through the existing UUID tree and
4270 * to delete all entries that contain outdated data.
4271 * 2nd step is to add all missing entries to the UUID tree.
4272 */
4273 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4274 if (ret < 0) {
4275 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4276 up(&fs_info->uuid_tree_rescan_sem);
4277 return ret;
4278 }
4279 return btrfs_uuid_scan_kthread(data);
4280 }
4281
4282 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4283 {
4284 struct btrfs_trans_handle *trans;
4285 struct btrfs_root *tree_root = fs_info->tree_root;
4286 struct btrfs_root *uuid_root;
4287 struct task_struct *task;
4288 int ret;
4289
4290 /*
4291 * 1 - root node
4292 * 1 - root item
4293 */
4294 trans = btrfs_start_transaction(tree_root, 2);
4295 if (IS_ERR(trans))
4296 return PTR_ERR(trans);
4297
4298 uuid_root = btrfs_create_tree(trans, fs_info,
4299 BTRFS_UUID_TREE_OBJECTID);
4300 if (IS_ERR(uuid_root)) {
4301 ret = PTR_ERR(uuid_root);
4302 btrfs_abort_transaction(trans, ret);
4303 btrfs_end_transaction(trans, tree_root);
4304 return ret;
4305 }
4306
4307 fs_info->uuid_root = uuid_root;
4308
4309 ret = btrfs_commit_transaction(trans, tree_root);
4310 if (ret)
4311 return ret;
4312
4313 down(&fs_info->uuid_tree_rescan_sem);
4314 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4315 if (IS_ERR(task)) {
4316 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4317 btrfs_warn(fs_info, "failed to start uuid_scan task");
4318 up(&fs_info->uuid_tree_rescan_sem);
4319 return PTR_ERR(task);
4320 }
4321
4322 return 0;
4323 }
4324
4325 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4326 {
4327 struct task_struct *task;
4328
4329 down(&fs_info->uuid_tree_rescan_sem);
4330 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4331 if (IS_ERR(task)) {
4332 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4333 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4334 up(&fs_info->uuid_tree_rescan_sem);
4335 return PTR_ERR(task);
4336 }
4337
4338 return 0;
4339 }
4340
4341 /*
4342 * shrinking a device means finding all of the device extents past
4343 * the new size, and then following the back refs to the chunks.
4344 * The chunk relocation code actually frees the device extent
4345 */
4346 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4347 {
4348 struct btrfs_trans_handle *trans;
4349 struct btrfs_root *root = device->dev_root;
4350 struct btrfs_dev_extent *dev_extent = NULL;
4351 struct btrfs_path *path;
4352 u64 length;
4353 u64 chunk_offset;
4354 int ret;
4355 int slot;
4356 int failed = 0;
4357 bool retried = false;
4358 bool checked_pending_chunks = false;
4359 struct extent_buffer *l;
4360 struct btrfs_key key;
4361 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4362 u64 old_total = btrfs_super_total_bytes(super_copy);
4363 u64 old_size = btrfs_device_get_total_bytes(device);
4364 u64 diff = old_size - new_size;
4365
4366 if (device->is_tgtdev_for_dev_replace)
4367 return -EINVAL;
4368
4369 path = btrfs_alloc_path();
4370 if (!path)
4371 return -ENOMEM;
4372
4373 path->reada = READA_FORWARD;
4374
4375 lock_chunks(root);
4376
4377 btrfs_device_set_total_bytes(device, new_size);
4378 if (device->writeable) {
4379 device->fs_devices->total_rw_bytes -= diff;
4380 spin_lock(&root->fs_info->free_chunk_lock);
4381 root->fs_info->free_chunk_space -= diff;
4382 spin_unlock(&root->fs_info->free_chunk_lock);
4383 }
4384 unlock_chunks(root);
4385
4386 again:
4387 key.objectid = device->devid;
4388 key.offset = (u64)-1;
4389 key.type = BTRFS_DEV_EXTENT_KEY;
4390
4391 do {
4392 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4393 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4394 if (ret < 0) {
4395 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4396 goto done;
4397 }
4398
4399 ret = btrfs_previous_item(root, path, 0, key.type);
4400 if (ret)
4401 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4402 if (ret < 0)
4403 goto done;
4404 if (ret) {
4405 ret = 0;
4406 btrfs_release_path(path);
4407 break;
4408 }
4409
4410 l = path->nodes[0];
4411 slot = path->slots[0];
4412 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4413
4414 if (key.objectid != device->devid) {
4415 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4416 btrfs_release_path(path);
4417 break;
4418 }
4419
4420 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4421 length = btrfs_dev_extent_length(l, dev_extent);
4422
4423 if (key.offset + length <= new_size) {
4424 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4425 btrfs_release_path(path);
4426 break;
4427 }
4428
4429 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4430 btrfs_release_path(path);
4431
4432 ret = btrfs_relocate_chunk(root, chunk_offset);
4433 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4434 if (ret && ret != -ENOSPC)
4435 goto done;
4436 if (ret == -ENOSPC)
4437 failed++;
4438 } while (key.offset-- > 0);
4439
4440 if (failed && !retried) {
4441 failed = 0;
4442 retried = true;
4443 goto again;
4444 } else if (failed && retried) {
4445 ret = -ENOSPC;
4446 goto done;
4447 }
4448
4449 /* Shrinking succeeded, else we would be at "done". */
4450 trans = btrfs_start_transaction(root, 0);
4451 if (IS_ERR(trans)) {
4452 ret = PTR_ERR(trans);
4453 goto done;
4454 }
4455
4456 lock_chunks(root);
4457
4458 /*
4459 * We checked in the above loop all device extents that were already in
4460 * the device tree. However before we have updated the device's
4461 * total_bytes to the new size, we might have had chunk allocations that
4462 * have not complete yet (new block groups attached to transaction
4463 * handles), and therefore their device extents were not yet in the
4464 * device tree and we missed them in the loop above. So if we have any
4465 * pending chunk using a device extent that overlaps the device range
4466 * that we can not use anymore, commit the current transaction and
4467 * repeat the search on the device tree - this way we guarantee we will
4468 * not have chunks using device extents that end beyond 'new_size'.
4469 */
4470 if (!checked_pending_chunks) {
4471 u64 start = new_size;
4472 u64 len = old_size - new_size;
4473
4474 if (contains_pending_extent(trans->transaction, device,
4475 &start, len)) {
4476 unlock_chunks(root);
4477 checked_pending_chunks = true;
4478 failed = 0;
4479 retried = false;
4480 ret = btrfs_commit_transaction(trans, root);
4481 if (ret)
4482 goto done;
4483 goto again;
4484 }
4485 }
4486
4487 btrfs_device_set_disk_total_bytes(device, new_size);
4488 if (list_empty(&device->resized_list))
4489 list_add_tail(&device->resized_list,
4490 &root->fs_info->fs_devices->resized_devices);
4491
4492 WARN_ON(diff > old_total);
4493 btrfs_set_super_total_bytes(super_copy, old_total - diff);
4494 unlock_chunks(root);
4495
4496 /* Now btrfs_update_device() will change the on-disk size. */
4497 ret = btrfs_update_device(trans, device);
4498 btrfs_end_transaction(trans, root);
4499 done:
4500 btrfs_free_path(path);
4501 if (ret) {
4502 lock_chunks(root);
4503 btrfs_device_set_total_bytes(device, old_size);
4504 if (device->writeable)
4505 device->fs_devices->total_rw_bytes += diff;
4506 spin_lock(&root->fs_info->free_chunk_lock);
4507 root->fs_info->free_chunk_space += diff;
4508 spin_unlock(&root->fs_info->free_chunk_lock);
4509 unlock_chunks(root);
4510 }
4511 return ret;
4512 }
4513
4514 static int btrfs_add_system_chunk(struct btrfs_root *root,
4515 struct btrfs_key *key,
4516 struct btrfs_chunk *chunk, int item_size)
4517 {
4518 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4519 struct btrfs_disk_key disk_key;
4520 u32 array_size;
4521 u8 *ptr;
4522
4523 lock_chunks(root);
4524 array_size = btrfs_super_sys_array_size(super_copy);
4525 if (array_size + item_size + sizeof(disk_key)
4526 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4527 unlock_chunks(root);
4528 return -EFBIG;
4529 }
4530
4531 ptr = super_copy->sys_chunk_array + array_size;
4532 btrfs_cpu_key_to_disk(&disk_key, key);
4533 memcpy(ptr, &disk_key, sizeof(disk_key));
4534 ptr += sizeof(disk_key);
4535 memcpy(ptr, chunk, item_size);
4536 item_size += sizeof(disk_key);
4537 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4538 unlock_chunks(root);
4539
4540 return 0;
4541 }
4542
4543 /*
4544 * sort the devices in descending order by max_avail, total_avail
4545 */
4546 static int btrfs_cmp_device_info(const void *a, const void *b)
4547 {
4548 const struct btrfs_device_info *di_a = a;
4549 const struct btrfs_device_info *di_b = b;
4550
4551 if (di_a->max_avail > di_b->max_avail)
4552 return -1;
4553 if (di_a->max_avail < di_b->max_avail)
4554 return 1;
4555 if (di_a->total_avail > di_b->total_avail)
4556 return -1;
4557 if (di_a->total_avail < di_b->total_avail)
4558 return 1;
4559 return 0;
4560 }
4561
4562 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4563 {
4564 /* TODO allow them to set a preferred stripe size */
4565 return SZ_64K;
4566 }
4567
4568 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4569 {
4570 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4571 return;
4572
4573 btrfs_set_fs_incompat(info, RAID56);
4574 }
4575
4576 #define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \
4577 - sizeof(struct btrfs_chunk)) \
4578 / sizeof(struct btrfs_stripe) + 1)
4579
4580 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
4581 - 2 * sizeof(struct btrfs_disk_key) \
4582 - 2 * sizeof(struct btrfs_chunk)) \
4583 / sizeof(struct btrfs_stripe) + 1)
4584
4585 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4586 struct btrfs_root *extent_root, u64 start,
4587 u64 type)
4588 {
4589 struct btrfs_fs_info *info = extent_root->fs_info;
4590 struct btrfs_fs_devices *fs_devices = info->fs_devices;
4591 struct list_head *cur;
4592 struct map_lookup *map = NULL;
4593 struct extent_map_tree *em_tree;
4594 struct extent_map *em;
4595 struct btrfs_device_info *devices_info = NULL;
4596 u64 total_avail;
4597 int num_stripes; /* total number of stripes to allocate */
4598 int data_stripes; /* number of stripes that count for
4599 block group size */
4600 int sub_stripes; /* sub_stripes info for map */
4601 int dev_stripes; /* stripes per dev */
4602 int devs_max; /* max devs to use */
4603 int devs_min; /* min devs needed */
4604 int devs_increment; /* ndevs has to be a multiple of this */
4605 int ncopies; /* how many copies to data has */
4606 int ret;
4607 u64 max_stripe_size;
4608 u64 max_chunk_size;
4609 u64 stripe_size;
4610 u64 num_bytes;
4611 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4612 int ndevs;
4613 int i;
4614 int j;
4615 int index;
4616
4617 BUG_ON(!alloc_profile_is_valid(type, 0));
4618
4619 if (list_empty(&fs_devices->alloc_list))
4620 return -ENOSPC;
4621
4622 index = __get_raid_index(type);
4623
4624 sub_stripes = btrfs_raid_array[index].sub_stripes;
4625 dev_stripes = btrfs_raid_array[index].dev_stripes;
4626 devs_max = btrfs_raid_array[index].devs_max;
4627 devs_min = btrfs_raid_array[index].devs_min;
4628 devs_increment = btrfs_raid_array[index].devs_increment;
4629 ncopies = btrfs_raid_array[index].ncopies;
4630
4631 if (type & BTRFS_BLOCK_GROUP_DATA) {
4632 max_stripe_size = SZ_1G;
4633 max_chunk_size = 10 * max_stripe_size;
4634 if (!devs_max)
4635 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4636 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4637 /* for larger filesystems, use larger metadata chunks */
4638 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4639 max_stripe_size = SZ_1G;
4640 else
4641 max_stripe_size = SZ_256M;
4642 max_chunk_size = max_stripe_size;
4643 if (!devs_max)
4644 devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4645 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4646 max_stripe_size = SZ_32M;
4647 max_chunk_size = 2 * max_stripe_size;
4648 if (!devs_max)
4649 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4650 } else {
4651 btrfs_err(info, "invalid chunk type 0x%llx requested",
4652 type);
4653 BUG_ON(1);
4654 }
4655
4656 /* we don't want a chunk larger than 10% of writeable space */
4657 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4658 max_chunk_size);
4659
4660 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4661 GFP_NOFS);
4662 if (!devices_info)
4663 return -ENOMEM;
4664
4665 cur = fs_devices->alloc_list.next;
4666
4667 /*
4668 * in the first pass through the devices list, we gather information
4669 * about the available holes on each device.
4670 */
4671 ndevs = 0;
4672 while (cur != &fs_devices->alloc_list) {
4673 struct btrfs_device *device;
4674 u64 max_avail;
4675 u64 dev_offset;
4676
4677 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4678
4679 cur = cur->next;
4680
4681 if (!device->writeable) {
4682 WARN(1, KERN_ERR
4683 "BTRFS: read-only device in alloc_list\n");
4684 continue;
4685 }
4686
4687 if (!device->in_fs_metadata ||
4688 device->is_tgtdev_for_dev_replace)
4689 continue;
4690
4691 if (device->total_bytes > device->bytes_used)
4692 total_avail = device->total_bytes - device->bytes_used;
4693 else
4694 total_avail = 0;
4695
4696 /* If there is no space on this device, skip it. */
4697 if (total_avail == 0)
4698 continue;
4699
4700 ret = find_free_dev_extent(trans, device,
4701 max_stripe_size * dev_stripes,
4702 &dev_offset, &max_avail);
4703 if (ret && ret != -ENOSPC)
4704 goto error;
4705
4706 if (ret == 0)
4707 max_avail = max_stripe_size * dev_stripes;
4708
4709 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4710 continue;
4711
4712 if (ndevs == fs_devices->rw_devices) {
4713 WARN(1, "%s: found more than %llu devices\n",
4714 __func__, fs_devices->rw_devices);
4715 break;
4716 }
4717 devices_info[ndevs].dev_offset = dev_offset;
4718 devices_info[ndevs].max_avail = max_avail;
4719 devices_info[ndevs].total_avail = total_avail;
4720 devices_info[ndevs].dev = device;
4721 ++ndevs;
4722 }
4723
4724 /*
4725 * now sort the devices by hole size / available space
4726 */
4727 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4728 btrfs_cmp_device_info, NULL);
4729
4730 /* round down to number of usable stripes */
4731 ndevs -= ndevs % devs_increment;
4732
4733 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4734 ret = -ENOSPC;
4735 goto error;
4736 }
4737
4738 if (devs_max && ndevs > devs_max)
4739 ndevs = devs_max;
4740 /*
4741 * the primary goal is to maximize the number of stripes, so use as many
4742 * devices as possible, even if the stripes are not maximum sized.
4743 */
4744 stripe_size = devices_info[ndevs-1].max_avail;
4745 num_stripes = ndevs * dev_stripes;
4746
4747 /*
4748 * this will have to be fixed for RAID1 and RAID10 over
4749 * more drives
4750 */
4751 data_stripes = num_stripes / ncopies;
4752
4753 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4754 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4755 extent_root->stripesize);
4756 data_stripes = num_stripes - 1;
4757 }
4758 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4759 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4760 extent_root->stripesize);
4761 data_stripes = num_stripes - 2;
4762 }
4763
4764 /*
4765 * Use the number of data stripes to figure out how big this chunk
4766 * is really going to be in terms of logical address space,
4767 * and compare that answer with the max chunk size
4768 */
4769 if (stripe_size * data_stripes > max_chunk_size) {
4770 u64 mask = (1ULL << 24) - 1;
4771
4772 stripe_size = div_u64(max_chunk_size, data_stripes);
4773
4774 /* bump the answer up to a 16MB boundary */
4775 stripe_size = (stripe_size + mask) & ~mask;
4776
4777 /* but don't go higher than the limits we found
4778 * while searching for free extents
4779 */
4780 if (stripe_size > devices_info[ndevs-1].max_avail)
4781 stripe_size = devices_info[ndevs-1].max_avail;
4782 }
4783
4784 stripe_size = div_u64(stripe_size, dev_stripes);
4785
4786 /* align to BTRFS_STRIPE_LEN */
4787 stripe_size = div_u64(stripe_size, raid_stripe_len);
4788 stripe_size *= raid_stripe_len;
4789
4790 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4791 if (!map) {
4792 ret = -ENOMEM;
4793 goto error;
4794 }
4795 map->num_stripes = num_stripes;
4796
4797 for (i = 0; i < ndevs; ++i) {
4798 for (j = 0; j < dev_stripes; ++j) {
4799 int s = i * dev_stripes + j;
4800 map->stripes[s].dev = devices_info[i].dev;
4801 map->stripes[s].physical = devices_info[i].dev_offset +
4802 j * stripe_size;
4803 }
4804 }
4805 map->sector_size = extent_root->sectorsize;
4806 map->stripe_len = raid_stripe_len;
4807 map->io_align = raid_stripe_len;
4808 map->io_width = raid_stripe_len;
4809 map->type = type;
4810 map->sub_stripes = sub_stripes;
4811
4812 num_bytes = stripe_size * data_stripes;
4813
4814 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4815
4816 em = alloc_extent_map();
4817 if (!em) {
4818 kfree(map);
4819 ret = -ENOMEM;
4820 goto error;
4821 }
4822 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4823 em->map_lookup = map;
4824 em->start = start;
4825 em->len = num_bytes;
4826 em->block_start = 0;
4827 em->block_len = em->len;
4828 em->orig_block_len = stripe_size;
4829
4830 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4831 write_lock(&em_tree->lock);
4832 ret = add_extent_mapping(em_tree, em, 0);
4833 if (!ret) {
4834 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4835 atomic_inc(&em->refs);
4836 }
4837 write_unlock(&em_tree->lock);
4838 if (ret) {
4839 free_extent_map(em);
4840 goto error;
4841 }
4842
4843 ret = btrfs_make_block_group(trans, extent_root, 0, type,
4844 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4845 start, num_bytes);
4846 if (ret)
4847 goto error_del_extent;
4848
4849 for (i = 0; i < map->num_stripes; i++) {
4850 num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4851 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4852 }
4853
4854 spin_lock(&extent_root->fs_info->free_chunk_lock);
4855 extent_root->fs_info->free_chunk_space -= (stripe_size *
4856 map->num_stripes);
4857 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4858
4859 free_extent_map(em);
4860 check_raid56_incompat_flag(extent_root->fs_info, type);
4861
4862 kfree(devices_info);
4863 return 0;
4864
4865 error_del_extent:
4866 write_lock(&em_tree->lock);
4867 remove_extent_mapping(em_tree, em);
4868 write_unlock(&em_tree->lock);
4869
4870 /* One for our allocation */
4871 free_extent_map(em);
4872 /* One for the tree reference */
4873 free_extent_map(em);
4874 /* One for the pending_chunks list reference */
4875 free_extent_map(em);
4876 error:
4877 kfree(devices_info);
4878 return ret;
4879 }
4880
4881 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4882 struct btrfs_root *extent_root,
4883 u64 chunk_offset, u64 chunk_size)
4884 {
4885 struct btrfs_key key;
4886 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4887 struct btrfs_device *device;
4888 struct btrfs_chunk *chunk;
4889 struct btrfs_stripe *stripe;
4890 struct extent_map_tree *em_tree;
4891 struct extent_map *em;
4892 struct map_lookup *map;
4893 size_t item_size;
4894 u64 dev_offset;
4895 u64 stripe_size;
4896 int i = 0;
4897 int ret = 0;
4898
4899 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4900 read_lock(&em_tree->lock);
4901 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4902 read_unlock(&em_tree->lock);
4903
4904 if (!em) {
4905 btrfs_crit(extent_root->fs_info, "unable to find logical "
4906 "%Lu len %Lu", chunk_offset, chunk_size);
4907 return -EINVAL;
4908 }
4909
4910 if (em->start != chunk_offset || em->len != chunk_size) {
4911 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4912 " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4913 chunk_size, em->start, em->len);
4914 free_extent_map(em);
4915 return -EINVAL;
4916 }
4917
4918 map = em->map_lookup;
4919 item_size = btrfs_chunk_item_size(map->num_stripes);
4920 stripe_size = em->orig_block_len;
4921
4922 chunk = kzalloc(item_size, GFP_NOFS);
4923 if (!chunk) {
4924 ret = -ENOMEM;
4925 goto out;
4926 }
4927
4928 /*
4929 * Take the device list mutex to prevent races with the final phase of
4930 * a device replace operation that replaces the device object associated
4931 * with the map's stripes, because the device object's id can change
4932 * at any time during that final phase of the device replace operation
4933 * (dev-replace.c:btrfs_dev_replace_finishing()).
4934 */
4935 mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4936 for (i = 0; i < map->num_stripes; i++) {
4937 device = map->stripes[i].dev;
4938 dev_offset = map->stripes[i].physical;
4939
4940 ret = btrfs_update_device(trans, device);
4941 if (ret)
4942 break;
4943 ret = btrfs_alloc_dev_extent(trans, device,
4944 chunk_root->root_key.objectid,
4945 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4946 chunk_offset, dev_offset,
4947 stripe_size);
4948 if (ret)
4949 break;
4950 }
4951 if (ret) {
4952 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4953 goto out;
4954 }
4955
4956 stripe = &chunk->stripe;
4957 for (i = 0; i < map->num_stripes; i++) {
4958 device = map->stripes[i].dev;
4959 dev_offset = map->stripes[i].physical;
4960
4961 btrfs_set_stack_stripe_devid(stripe, device->devid);
4962 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4963 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4964 stripe++;
4965 }
4966 mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4967
4968 btrfs_set_stack_chunk_length(chunk, chunk_size);
4969 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4970 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4971 btrfs_set_stack_chunk_type(chunk, map->type);
4972 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4973 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4974 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4975 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4976 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4977
4978 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4979 key.type = BTRFS_CHUNK_ITEM_KEY;
4980 key.offset = chunk_offset;
4981
4982 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4983 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4984 /*
4985 * TODO: Cleanup of inserted chunk root in case of
4986 * failure.
4987 */
4988 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4989 item_size);
4990 }
4991
4992 out:
4993 kfree(chunk);
4994 free_extent_map(em);
4995 return ret;
4996 }
4997
4998 /*
4999 * Chunk allocation falls into two parts. The first part does works
5000 * that make the new allocated chunk useable, but not do any operation
5001 * that modifies the chunk tree. The second part does the works that
5002 * require modifying the chunk tree. This division is important for the
5003 * bootstrap process of adding storage to a seed btrfs.
5004 */
5005 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5006 struct btrfs_root *extent_root, u64 type)
5007 {
5008 u64 chunk_offset;
5009
5010 ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
5011 chunk_offset = find_next_chunk(extent_root->fs_info);
5012 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
5013 }
5014
5015 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
5016 struct btrfs_root *root,
5017 struct btrfs_device *device)
5018 {
5019 u64 chunk_offset;
5020 u64 sys_chunk_offset;
5021 u64 alloc_profile;
5022 struct btrfs_fs_info *fs_info = root->fs_info;
5023 struct btrfs_root *extent_root = fs_info->extent_root;
5024 int ret;
5025
5026 chunk_offset = find_next_chunk(fs_info);
5027 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
5028 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
5029 alloc_profile);
5030 if (ret)
5031 return ret;
5032
5033 sys_chunk_offset = find_next_chunk(root->fs_info);
5034 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
5035 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
5036 alloc_profile);
5037 return ret;
5038 }
5039
5040 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5041 {
5042 int max_errors;
5043
5044 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5045 BTRFS_BLOCK_GROUP_RAID10 |
5046 BTRFS_BLOCK_GROUP_RAID5 |
5047 BTRFS_BLOCK_GROUP_DUP)) {
5048 max_errors = 1;
5049 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5050 max_errors = 2;
5051 } else {
5052 max_errors = 0;
5053 }
5054
5055 return max_errors;
5056 }
5057
5058 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
5059 {
5060 struct extent_map *em;
5061 struct map_lookup *map;
5062 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5063 int readonly = 0;
5064 int miss_ndevs = 0;
5065 int i;
5066
5067 read_lock(&map_tree->map_tree.lock);
5068 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
5069 read_unlock(&map_tree->map_tree.lock);
5070 if (!em)
5071 return 1;
5072
5073 map = em->map_lookup;
5074 for (i = 0; i < map->num_stripes; i++) {
5075 if (map->stripes[i].dev->missing) {
5076 miss_ndevs++;
5077 continue;
5078 }
5079
5080 if (!map->stripes[i].dev->writeable) {
5081 readonly = 1;
5082 goto end;
5083 }
5084 }
5085
5086 /*
5087 * If the number of missing devices is larger than max errors,
5088 * we can not write the data into that chunk successfully, so
5089 * set it readonly.
5090 */
5091 if (miss_ndevs > btrfs_chunk_max_errors(map))
5092 readonly = 1;
5093 end:
5094 free_extent_map(em);
5095 return readonly;
5096 }
5097
5098 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5099 {
5100 extent_map_tree_init(&tree->map_tree);
5101 }
5102
5103 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5104 {
5105 struct extent_map *em;
5106
5107 while (1) {
5108 write_lock(&tree->map_tree.lock);
5109 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5110 if (em)
5111 remove_extent_mapping(&tree->map_tree, em);
5112 write_unlock(&tree->map_tree.lock);
5113 if (!em)
5114 break;
5115 /* once for us */
5116 free_extent_map(em);
5117 /* once for the tree */
5118 free_extent_map(em);
5119 }
5120 }
5121
5122 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5123 {
5124 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5125 struct extent_map *em;
5126 struct map_lookup *map;
5127 struct extent_map_tree *em_tree = &map_tree->map_tree;
5128 int ret;
5129
5130 read_lock(&em_tree->lock);
5131 em = lookup_extent_mapping(em_tree, logical, len);
5132 read_unlock(&em_tree->lock);
5133
5134 /*
5135 * We could return errors for these cases, but that could get ugly and
5136 * we'd probably do the same thing which is just not do anything else
5137 * and exit, so return 1 so the callers don't try to use other copies.
5138 */
5139 if (!em) {
5140 btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5141 logical+len);
5142 return 1;
5143 }
5144
5145 if (em->start > logical || em->start + em->len < logical) {
5146 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5147 "%Lu-%Lu", logical, logical+len, em->start,
5148 em->start + em->len);
5149 free_extent_map(em);
5150 return 1;
5151 }
5152
5153 map = em->map_lookup;
5154 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5155 ret = map->num_stripes;
5156 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5157 ret = map->sub_stripes;
5158 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5159 ret = 2;
5160 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5161 ret = 3;
5162 else
5163 ret = 1;
5164 free_extent_map(em);
5165
5166 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5167 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5168 ret++;
5169 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5170
5171 return ret;
5172 }
5173
5174 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5175 struct btrfs_mapping_tree *map_tree,
5176 u64 logical)
5177 {
5178 struct extent_map *em;
5179 struct map_lookup *map;
5180 struct extent_map_tree *em_tree = &map_tree->map_tree;
5181 unsigned long len = root->sectorsize;
5182
5183 read_lock(&em_tree->lock);
5184 em = lookup_extent_mapping(em_tree, logical, len);
5185 read_unlock(&em_tree->lock);
5186 BUG_ON(!em);
5187
5188 BUG_ON(em->start > logical || em->start + em->len < logical);
5189 map = em->map_lookup;
5190 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5191 len = map->stripe_len * nr_data_stripes(map);
5192 free_extent_map(em);
5193 return len;
5194 }
5195
5196 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5197 u64 logical, u64 len, int mirror_num)
5198 {
5199 struct extent_map *em;
5200 struct map_lookup *map;
5201 struct extent_map_tree *em_tree = &map_tree->map_tree;
5202 int ret = 0;
5203
5204 read_lock(&em_tree->lock);
5205 em = lookup_extent_mapping(em_tree, logical, len);
5206 read_unlock(&em_tree->lock);
5207 BUG_ON(!em);
5208
5209 BUG_ON(em->start > logical || em->start + em->len < logical);
5210 map = em->map_lookup;
5211 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5212 ret = 1;
5213 free_extent_map(em);
5214 return ret;
5215 }
5216
5217 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5218 struct map_lookup *map, int first, int num,
5219 int optimal, int dev_replace_is_ongoing)
5220 {
5221 int i;
5222 int tolerance;
5223 struct btrfs_device *srcdev;
5224
5225 if (dev_replace_is_ongoing &&
5226 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5227 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5228 srcdev = fs_info->dev_replace.srcdev;
5229 else
5230 srcdev = NULL;
5231
5232 /*
5233 * try to avoid the drive that is the source drive for a
5234 * dev-replace procedure, only choose it if no other non-missing
5235 * mirror is available
5236 */
5237 for (tolerance = 0; tolerance < 2; tolerance++) {
5238 if (map->stripes[optimal].dev->bdev &&
5239 (tolerance || map->stripes[optimal].dev != srcdev))
5240 return optimal;
5241 for (i = first; i < first + num; i++) {
5242 if (map->stripes[i].dev->bdev &&
5243 (tolerance || map->stripes[i].dev != srcdev))
5244 return i;
5245 }
5246 }
5247
5248 /* we couldn't find one that doesn't fail. Just return something
5249 * and the io error handling code will clean up eventually
5250 */
5251 return optimal;
5252 }
5253
5254 static inline int parity_smaller(u64 a, u64 b)
5255 {
5256 return a > b;
5257 }
5258
5259 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5260 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5261 {
5262 struct btrfs_bio_stripe s;
5263 int i;
5264 u64 l;
5265 int again = 1;
5266
5267 while (again) {
5268 again = 0;
5269 for (i = 0; i < num_stripes - 1; i++) {
5270 if (parity_smaller(bbio->raid_map[i],
5271 bbio->raid_map[i+1])) {
5272 s = bbio->stripes[i];
5273 l = bbio->raid_map[i];
5274 bbio->stripes[i] = bbio->stripes[i+1];
5275 bbio->raid_map[i] = bbio->raid_map[i+1];
5276 bbio->stripes[i+1] = s;
5277 bbio->raid_map[i+1] = l;
5278
5279 again = 1;
5280 }
5281 }
5282 }
5283 }
5284
5285 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5286 {
5287 struct btrfs_bio *bbio = kzalloc(
5288 /* the size of the btrfs_bio */
5289 sizeof(struct btrfs_bio) +
5290 /* plus the variable array for the stripes */
5291 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5292 /* plus the variable array for the tgt dev */
5293 sizeof(int) * (real_stripes) +
5294 /*
5295 * plus the raid_map, which includes both the tgt dev
5296 * and the stripes
5297 */
5298 sizeof(u64) * (total_stripes),
5299 GFP_NOFS|__GFP_NOFAIL);
5300
5301 atomic_set(&bbio->error, 0);
5302 atomic_set(&bbio->refs, 1);
5303
5304 return bbio;
5305 }
5306
5307 void btrfs_get_bbio(struct btrfs_bio *bbio)
5308 {
5309 WARN_ON(!atomic_read(&bbio->refs));
5310 atomic_inc(&bbio->refs);
5311 }
5312
5313 void btrfs_put_bbio(struct btrfs_bio *bbio)
5314 {
5315 if (!bbio)
5316 return;
5317 if (atomic_dec_and_test(&bbio->refs))
5318 kfree(bbio);
5319 }
5320
5321 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5322 u64 logical, u64 *length,
5323 struct btrfs_bio **bbio_ret,
5324 int mirror_num, int need_raid_map)
5325 {
5326 struct extent_map *em;
5327 struct map_lookup *map;
5328 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5329 struct extent_map_tree *em_tree = &map_tree->map_tree;
5330 u64 offset;
5331 u64 stripe_offset;
5332 u64 stripe_end_offset;
5333 u64 stripe_nr;
5334 u64 stripe_nr_orig;
5335 u64 stripe_nr_end;
5336 u64 stripe_len;
5337 u32 stripe_index;
5338 int i;
5339 int ret = 0;
5340 int num_stripes;
5341 int max_errors = 0;
5342 int tgtdev_indexes = 0;
5343 struct btrfs_bio *bbio = NULL;
5344 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5345 int dev_replace_is_ongoing = 0;
5346 int num_alloc_stripes;
5347 int patch_the_first_stripe_for_dev_replace = 0;
5348 u64 physical_to_patch_in_first_stripe = 0;
5349 u64 raid56_full_stripe_start = (u64)-1;
5350
5351 read_lock(&em_tree->lock);
5352 em = lookup_extent_mapping(em_tree, logical, *length);
5353 read_unlock(&em_tree->lock);
5354
5355 if (!em) {
5356 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5357 logical, *length);
5358 return -EINVAL;
5359 }
5360
5361 if (em->start > logical || em->start + em->len < logical) {
5362 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5363 "found %Lu-%Lu", logical, em->start,
5364 em->start + em->len);
5365 free_extent_map(em);
5366 return -EINVAL;
5367 }
5368
5369 map = em->map_lookup;
5370 offset = logical - em->start;
5371
5372 stripe_len = map->stripe_len;
5373 stripe_nr = offset;
5374 /*
5375 * stripe_nr counts the total number of stripes we have to stride
5376 * to get to this block
5377 */
5378 stripe_nr = div64_u64(stripe_nr, stripe_len);
5379
5380 stripe_offset = stripe_nr * stripe_len;
5381 if (offset < stripe_offset) {
5382 btrfs_crit(fs_info, "stripe math has gone wrong, "
5383 "stripe_offset=%llu, offset=%llu, start=%llu, "
5384 "logical=%llu, stripe_len=%llu",
5385 stripe_offset, offset, em->start, logical,
5386 stripe_len);
5387 free_extent_map(em);
5388 return -EINVAL;
5389 }
5390
5391 /* stripe_offset is the offset of this block in its stripe*/
5392 stripe_offset = offset - stripe_offset;
5393
5394 /* if we're here for raid56, we need to know the stripe aligned start */
5395 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5396 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5397 raid56_full_stripe_start = offset;
5398
5399 /* allow a write of a full stripe, but make sure we don't
5400 * allow straddling of stripes
5401 */
5402 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5403 full_stripe_len);
5404 raid56_full_stripe_start *= full_stripe_len;
5405 }
5406
5407 if (op == REQ_OP_DISCARD) {
5408 /* we don't discard raid56 yet */
5409 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5410 ret = -EOPNOTSUPP;
5411 goto out;
5412 }
5413 *length = min_t(u64, em->len - offset, *length);
5414 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5415 u64 max_len;
5416 /* For writes to RAID[56], allow a full stripeset across all disks.
5417 For other RAID types and for RAID[56] reads, just allow a single
5418 stripe (on a single disk). */
5419 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5420 (op == REQ_OP_WRITE)) {
5421 max_len = stripe_len * nr_data_stripes(map) -
5422 (offset - raid56_full_stripe_start);
5423 } else {
5424 /* we limit the length of each bio to what fits in a stripe */
5425 max_len = stripe_len - stripe_offset;
5426 }
5427 *length = min_t(u64, em->len - offset, max_len);
5428 } else {
5429 *length = em->len - offset;
5430 }
5431
5432 /* This is for when we're called from btrfs_merge_bio_hook() and all
5433 it cares about is the length */
5434 if (!bbio_ret)
5435 goto out;
5436
5437 btrfs_dev_replace_lock(dev_replace, 0);
5438 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5439 if (!dev_replace_is_ongoing)
5440 btrfs_dev_replace_unlock(dev_replace, 0);
5441 else
5442 btrfs_dev_replace_set_lock_blocking(dev_replace);
5443
5444 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5445 op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5446 op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
5447 /*
5448 * in dev-replace case, for repair case (that's the only
5449 * case where the mirror is selected explicitly when
5450 * calling btrfs_map_block), blocks left of the left cursor
5451 * can also be read from the target drive.
5452 * For REQ_GET_READ_MIRRORS, the target drive is added as
5453 * the last one to the array of stripes. For READ, it also
5454 * needs to be supported using the same mirror number.
5455 * If the requested block is not left of the left cursor,
5456 * EIO is returned. This can happen because btrfs_num_copies()
5457 * returns one more in the dev-replace case.
5458 */
5459 u64 tmp_length = *length;
5460 struct btrfs_bio *tmp_bbio = NULL;
5461 int tmp_num_stripes;
5462 u64 srcdev_devid = dev_replace->srcdev->devid;
5463 int index_srcdev = 0;
5464 int found = 0;
5465 u64 physical_of_found = 0;
5466
5467 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5468 logical, &tmp_length, &tmp_bbio, 0, 0);
5469 if (ret) {
5470 WARN_ON(tmp_bbio != NULL);
5471 goto out;
5472 }
5473
5474 tmp_num_stripes = tmp_bbio->num_stripes;
5475 if (mirror_num > tmp_num_stripes) {
5476 /*
5477 * REQ_GET_READ_MIRRORS does not contain this
5478 * mirror, that means that the requested area
5479 * is not left of the left cursor
5480 */
5481 ret = -EIO;
5482 btrfs_put_bbio(tmp_bbio);
5483 goto out;
5484 }
5485
5486 /*
5487 * process the rest of the function using the mirror_num
5488 * of the source drive. Therefore look it up first.
5489 * At the end, patch the device pointer to the one of the
5490 * target drive.
5491 */
5492 for (i = 0; i < tmp_num_stripes; i++) {
5493 if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5494 continue;
5495
5496 /*
5497 * In case of DUP, in order to keep it simple, only add
5498 * the mirror with the lowest physical address
5499 */
5500 if (found &&
5501 physical_of_found <= tmp_bbio->stripes[i].physical)
5502 continue;
5503
5504 index_srcdev = i;
5505 found = 1;
5506 physical_of_found = tmp_bbio->stripes[i].physical;
5507 }
5508
5509 btrfs_put_bbio(tmp_bbio);
5510
5511 if (!found) {
5512 WARN_ON(1);
5513 ret = -EIO;
5514 goto out;
5515 }
5516
5517 mirror_num = index_srcdev + 1;
5518 patch_the_first_stripe_for_dev_replace = 1;
5519 physical_to_patch_in_first_stripe = physical_of_found;
5520 } else if (mirror_num > map->num_stripes) {
5521 mirror_num = 0;
5522 }
5523
5524 num_stripes = 1;
5525 stripe_index = 0;
5526 stripe_nr_orig = stripe_nr;
5527 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5528 stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5529 stripe_end_offset = stripe_nr_end * map->stripe_len -
5530 (offset + *length);
5531
5532 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5533 if (op == REQ_OP_DISCARD)
5534 num_stripes = min_t(u64, map->num_stripes,
5535 stripe_nr_end - stripe_nr_orig);
5536 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5537 &stripe_index);
5538 if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5539 op != REQ_GET_READ_MIRRORS)
5540 mirror_num = 1;
5541 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5542 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
5543 op == REQ_GET_READ_MIRRORS)
5544 num_stripes = map->num_stripes;
5545 else if (mirror_num)
5546 stripe_index = mirror_num - 1;
5547 else {
5548 stripe_index = find_live_mirror(fs_info, map, 0,
5549 map->num_stripes,
5550 current->pid % map->num_stripes,
5551 dev_replace_is_ongoing);
5552 mirror_num = stripe_index + 1;
5553 }
5554
5555 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5556 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
5557 op == REQ_GET_READ_MIRRORS) {
5558 num_stripes = map->num_stripes;
5559 } else if (mirror_num) {
5560 stripe_index = mirror_num - 1;
5561 } else {
5562 mirror_num = 1;
5563 }
5564
5565 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5566 u32 factor = map->num_stripes / map->sub_stripes;
5567
5568 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5569 stripe_index *= map->sub_stripes;
5570
5571 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
5572 num_stripes = map->sub_stripes;
5573 else if (op == REQ_OP_DISCARD)
5574 num_stripes = min_t(u64, map->sub_stripes *
5575 (stripe_nr_end - stripe_nr_orig),
5576 map->num_stripes);
5577 else if (mirror_num)
5578 stripe_index += mirror_num - 1;
5579 else {
5580 int old_stripe_index = stripe_index;
5581 stripe_index = find_live_mirror(fs_info, map,
5582 stripe_index,
5583 map->sub_stripes, stripe_index +
5584 current->pid % map->sub_stripes,
5585 dev_replace_is_ongoing);
5586 mirror_num = stripe_index - old_stripe_index + 1;
5587 }
5588
5589 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5590 if (need_raid_map &&
5591 (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
5592 mirror_num > 1)) {
5593 /* push stripe_nr back to the start of the full stripe */
5594 stripe_nr = div_u64(raid56_full_stripe_start,
5595 stripe_len * nr_data_stripes(map));
5596
5597 /* RAID[56] write or recovery. Return all stripes */
5598 num_stripes = map->num_stripes;
5599 max_errors = nr_parity_stripes(map);
5600
5601 *length = map->stripe_len;
5602 stripe_index = 0;
5603 stripe_offset = 0;
5604 } else {
5605 /*
5606 * Mirror #0 or #1 means the original data block.
5607 * Mirror #2 is RAID5 parity block.
5608 * Mirror #3 is RAID6 Q block.
5609 */
5610 stripe_nr = div_u64_rem(stripe_nr,
5611 nr_data_stripes(map), &stripe_index);
5612 if (mirror_num > 1)
5613 stripe_index = nr_data_stripes(map) +
5614 mirror_num - 2;
5615
5616 /* We distribute the parity blocks across stripes */
5617 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5618 &stripe_index);
5619 if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
5620 op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
5621 mirror_num = 1;
5622 }
5623 } else {
5624 /*
5625 * after this, stripe_nr is the number of stripes on this
5626 * device we have to walk to find the data, and stripe_index is
5627 * the number of our device in the stripe array
5628 */
5629 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5630 &stripe_index);
5631 mirror_num = stripe_index + 1;
5632 }
5633 if (stripe_index >= map->num_stripes) {
5634 btrfs_crit(fs_info, "stripe index math went horribly wrong, "
5635 "got stripe_index=%u, num_stripes=%u",
5636 stripe_index, map->num_stripes);
5637 ret = -EINVAL;
5638 goto out;
5639 }
5640
5641 num_alloc_stripes = num_stripes;
5642 if (dev_replace_is_ongoing) {
5643 if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
5644 num_alloc_stripes <<= 1;
5645 if (op == REQ_GET_READ_MIRRORS)
5646 num_alloc_stripes++;
5647 tgtdev_indexes = num_stripes;
5648 }
5649
5650 bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5651 if (!bbio) {
5652 ret = -ENOMEM;
5653 goto out;
5654 }
5655 if (dev_replace_is_ongoing)
5656 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5657
5658 /* build raid_map */
5659 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5660 need_raid_map &&
5661 ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
5662 mirror_num > 1)) {
5663 u64 tmp;
5664 unsigned rot;
5665
5666 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5667 sizeof(struct btrfs_bio_stripe) *
5668 num_alloc_stripes +
5669 sizeof(int) * tgtdev_indexes);
5670
5671 /* Work out the disk rotation on this stripe-set */
5672 div_u64_rem(stripe_nr, num_stripes, &rot);
5673
5674 /* Fill in the logical address of each stripe */
5675 tmp = stripe_nr * nr_data_stripes(map);
5676 for (i = 0; i < nr_data_stripes(map); i++)
5677 bbio->raid_map[(i+rot) % num_stripes] =
5678 em->start + (tmp + i) * map->stripe_len;
5679
5680 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5681 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5682 bbio->raid_map[(i+rot+1) % num_stripes] =
5683 RAID6_Q_STRIPE;
5684 }
5685
5686 if (op == REQ_OP_DISCARD) {
5687 u32 factor = 0;
5688 u32 sub_stripes = 0;
5689 u64 stripes_per_dev = 0;
5690 u32 remaining_stripes = 0;
5691 u32 last_stripe = 0;
5692
5693 if (map->type &
5694 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5695 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5696 sub_stripes = 1;
5697 else
5698 sub_stripes = map->sub_stripes;
5699
5700 factor = map->num_stripes / sub_stripes;
5701 stripes_per_dev = div_u64_rem(stripe_nr_end -
5702 stripe_nr_orig,
5703 factor,
5704 &remaining_stripes);
5705 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5706 last_stripe *= sub_stripes;
5707 }
5708
5709 for (i = 0; i < num_stripes; i++) {
5710 bbio->stripes[i].physical =
5711 map->stripes[stripe_index].physical +
5712 stripe_offset + stripe_nr * map->stripe_len;
5713 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5714
5715 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5716 BTRFS_BLOCK_GROUP_RAID10)) {
5717 bbio->stripes[i].length = stripes_per_dev *
5718 map->stripe_len;
5719
5720 if (i / sub_stripes < remaining_stripes)
5721 bbio->stripes[i].length +=
5722 map->stripe_len;
5723
5724 /*
5725 * Special for the first stripe and
5726 * the last stripe:
5727 *
5728 * |-------|...|-------|
5729 * |----------|
5730 * off end_off
5731 */
5732 if (i < sub_stripes)
5733 bbio->stripes[i].length -=
5734 stripe_offset;
5735
5736 if (stripe_index >= last_stripe &&
5737 stripe_index <= (last_stripe +
5738 sub_stripes - 1))
5739 bbio->stripes[i].length -=
5740 stripe_end_offset;
5741
5742 if (i == sub_stripes - 1)
5743 stripe_offset = 0;
5744 } else
5745 bbio->stripes[i].length = *length;
5746
5747 stripe_index++;
5748 if (stripe_index == map->num_stripes) {
5749 /* This could only happen for RAID0/10 */
5750 stripe_index = 0;
5751 stripe_nr++;
5752 }
5753 }
5754 } else {
5755 for (i = 0; i < num_stripes; i++) {
5756 bbio->stripes[i].physical =
5757 map->stripes[stripe_index].physical +
5758 stripe_offset +
5759 stripe_nr * map->stripe_len;
5760 bbio->stripes[i].dev =
5761 map->stripes[stripe_index].dev;
5762 stripe_index++;
5763 }
5764 }
5765
5766 if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
5767 max_errors = btrfs_chunk_max_errors(map);
5768
5769 if (bbio->raid_map)
5770 sort_parity_stripes(bbio, num_stripes);
5771
5772 tgtdev_indexes = 0;
5773 if (dev_replace_is_ongoing &&
5774 (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
5775 dev_replace->tgtdev != NULL) {
5776 int index_where_to_add;
5777 u64 srcdev_devid = dev_replace->srcdev->devid;
5778
5779 /*
5780 * duplicate the write operations while the dev replace
5781 * procedure is running. Since the copying of the old disk
5782 * to the new disk takes place at run time while the
5783 * filesystem is mounted writable, the regular write
5784 * operations to the old disk have to be duplicated to go
5785 * to the new disk as well.
5786 * Note that device->missing is handled by the caller, and
5787 * that the write to the old disk is already set up in the
5788 * stripes array.
5789 */
5790 index_where_to_add = num_stripes;
5791 for (i = 0; i < num_stripes; i++) {
5792 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5793 /* write to new disk, too */
5794 struct btrfs_bio_stripe *new =
5795 bbio->stripes + index_where_to_add;
5796 struct btrfs_bio_stripe *old =
5797 bbio->stripes + i;
5798
5799 new->physical = old->physical;
5800 new->length = old->length;
5801 new->dev = dev_replace->tgtdev;
5802 bbio->tgtdev_map[i] = index_where_to_add;
5803 index_where_to_add++;
5804 max_errors++;
5805 tgtdev_indexes++;
5806 }
5807 }
5808 num_stripes = index_where_to_add;
5809 } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
5810 dev_replace->tgtdev != NULL) {
5811 u64 srcdev_devid = dev_replace->srcdev->devid;
5812 int index_srcdev = 0;
5813 int found = 0;
5814 u64 physical_of_found = 0;
5815
5816 /*
5817 * During the dev-replace procedure, the target drive can
5818 * also be used to read data in case it is needed to repair
5819 * a corrupt block elsewhere. This is possible if the
5820 * requested area is left of the left cursor. In this area,
5821 * the target drive is a full copy of the source drive.
5822 */
5823 for (i = 0; i < num_stripes; i++) {
5824 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5825 /*
5826 * In case of DUP, in order to keep it
5827 * simple, only add the mirror with the
5828 * lowest physical address
5829 */
5830 if (found &&
5831 physical_of_found <=
5832 bbio->stripes[i].physical)
5833 continue;
5834 index_srcdev = i;
5835 found = 1;
5836 physical_of_found = bbio->stripes[i].physical;
5837 }
5838 }
5839 if (found) {
5840 struct btrfs_bio_stripe *tgtdev_stripe =
5841 bbio->stripes + num_stripes;
5842
5843 tgtdev_stripe->physical = physical_of_found;
5844 tgtdev_stripe->length =
5845 bbio->stripes[index_srcdev].length;
5846 tgtdev_stripe->dev = dev_replace->tgtdev;
5847 bbio->tgtdev_map[index_srcdev] = num_stripes;
5848
5849 tgtdev_indexes++;
5850 num_stripes++;
5851 }
5852 }
5853
5854 *bbio_ret = bbio;
5855 bbio->map_type = map->type;
5856 bbio->num_stripes = num_stripes;
5857 bbio->max_errors = max_errors;
5858 bbio->mirror_num = mirror_num;
5859 bbio->num_tgtdevs = tgtdev_indexes;
5860
5861 /*
5862 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5863 * mirror_num == num_stripes + 1 && dev_replace target drive is
5864 * available as a mirror
5865 */
5866 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5867 WARN_ON(num_stripes > 1);
5868 bbio->stripes[0].dev = dev_replace->tgtdev;
5869 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5870 bbio->mirror_num = map->num_stripes + 1;
5871 }
5872 out:
5873 if (dev_replace_is_ongoing) {
5874 btrfs_dev_replace_clear_lock_blocking(dev_replace);
5875 btrfs_dev_replace_unlock(dev_replace, 0);
5876 }
5877 free_extent_map(em);
5878 return ret;
5879 }
5880
5881 int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
5882 u64 logical, u64 *length,
5883 struct btrfs_bio **bbio_ret, int mirror_num)
5884 {
5885 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5886 mirror_num, 0);
5887 }
5888
5889 /* For Scrub/replace */
5890 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
5891 u64 logical, u64 *length,
5892 struct btrfs_bio **bbio_ret, int mirror_num,
5893 int need_raid_map)
5894 {
5895 return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5896 mirror_num, need_raid_map);
5897 }
5898
5899 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5900 u64 chunk_start, u64 physical, u64 devid,
5901 u64 **logical, int *naddrs, int *stripe_len)
5902 {
5903 struct extent_map_tree *em_tree = &map_tree->map_tree;
5904 struct extent_map *em;
5905 struct map_lookup *map;
5906 u64 *buf;
5907 u64 bytenr;
5908 u64 length;
5909 u64 stripe_nr;
5910 u64 rmap_len;
5911 int i, j, nr = 0;
5912
5913 read_lock(&em_tree->lock);
5914 em = lookup_extent_mapping(em_tree, chunk_start, 1);
5915 read_unlock(&em_tree->lock);
5916
5917 if (!em) {
5918 printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5919 chunk_start);
5920 return -EIO;
5921 }
5922
5923 if (em->start != chunk_start) {
5924 printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5925 em->start, chunk_start);
5926 free_extent_map(em);
5927 return -EIO;
5928 }
5929 map = em->map_lookup;
5930
5931 length = em->len;
5932 rmap_len = map->stripe_len;
5933
5934 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5935 length = div_u64(length, map->num_stripes / map->sub_stripes);
5936 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5937 length = div_u64(length, map->num_stripes);
5938 else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5939 length = div_u64(length, nr_data_stripes(map));
5940 rmap_len = map->stripe_len * nr_data_stripes(map);
5941 }
5942
5943 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5944 BUG_ON(!buf); /* -ENOMEM */
5945
5946 for (i = 0; i < map->num_stripes; i++) {
5947 if (devid && map->stripes[i].dev->devid != devid)
5948 continue;
5949 if (map->stripes[i].physical > physical ||
5950 map->stripes[i].physical + length <= physical)
5951 continue;
5952
5953 stripe_nr = physical - map->stripes[i].physical;
5954 stripe_nr = div_u64(stripe_nr, map->stripe_len);
5955
5956 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5957 stripe_nr = stripe_nr * map->num_stripes + i;
5958 stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5959 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5960 stripe_nr = stripe_nr * map->num_stripes + i;
5961 } /* else if RAID[56], multiply by nr_data_stripes().
5962 * Alternatively, just use rmap_len below instead of
5963 * map->stripe_len */
5964
5965 bytenr = chunk_start + stripe_nr * rmap_len;
5966 WARN_ON(nr >= map->num_stripes);
5967 for (j = 0; j < nr; j++) {
5968 if (buf[j] == bytenr)
5969 break;
5970 }
5971 if (j == nr) {
5972 WARN_ON(nr >= map->num_stripes);
5973 buf[nr++] = bytenr;
5974 }
5975 }
5976
5977 *logical = buf;
5978 *naddrs = nr;
5979 *stripe_len = rmap_len;
5980
5981 free_extent_map(em);
5982 return 0;
5983 }
5984
5985 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5986 {
5987 bio->bi_private = bbio->private;
5988 bio->bi_end_io = bbio->end_io;
5989 bio_endio(bio);
5990
5991 btrfs_put_bbio(bbio);
5992 }
5993
5994 static void btrfs_end_bio(struct bio *bio)
5995 {
5996 struct btrfs_bio *bbio = bio->bi_private;
5997 int is_orig_bio = 0;
5998
5999 if (bio->bi_error) {
6000 atomic_inc(&bbio->error);
6001 if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
6002 unsigned int stripe_index =
6003 btrfs_io_bio(bio)->stripe_index;
6004 struct btrfs_device *dev;
6005
6006 BUG_ON(stripe_index >= bbio->num_stripes);
6007 dev = bbio->stripes[stripe_index].dev;
6008 if (dev->bdev) {
6009 if (bio_op(bio) == REQ_OP_WRITE)
6010 btrfs_dev_stat_inc(dev,
6011 BTRFS_DEV_STAT_WRITE_ERRS);
6012 else
6013 btrfs_dev_stat_inc(dev,
6014 BTRFS_DEV_STAT_READ_ERRS);
6015 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
6016 btrfs_dev_stat_inc(dev,
6017 BTRFS_DEV_STAT_FLUSH_ERRS);
6018 btrfs_dev_stat_print_on_error(dev);
6019 }
6020 }
6021 }
6022
6023 if (bio == bbio->orig_bio)
6024 is_orig_bio = 1;
6025
6026 btrfs_bio_counter_dec(bbio->fs_info);
6027
6028 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6029 if (!is_orig_bio) {
6030 bio_put(bio);
6031 bio = bbio->orig_bio;
6032 }
6033
6034 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6035 /* only send an error to the higher layers if it is
6036 * beyond the tolerance of the btrfs bio
6037 */
6038 if (atomic_read(&bbio->error) > bbio->max_errors) {
6039 bio->bi_error = -EIO;
6040 } else {
6041 /*
6042 * this bio is actually up to date, we didn't
6043 * go over the max number of errors
6044 */
6045 bio->bi_error = 0;
6046 }
6047
6048 btrfs_end_bbio(bbio, bio);
6049 } else if (!is_orig_bio) {
6050 bio_put(bio);
6051 }
6052 }
6053
6054 /*
6055 * see run_scheduled_bios for a description of why bios are collected for
6056 * async submit.
6057 *
6058 * This will add one bio to the pending list for a device and make sure
6059 * the work struct is scheduled.
6060 */
6061 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
6062 struct btrfs_device *device,
6063 struct bio *bio)
6064 {
6065 int should_queue = 1;
6066 struct btrfs_pending_bios *pending_bios;
6067
6068 if (device->missing || !device->bdev) {
6069 bio_io_error(bio);
6070 return;
6071 }
6072
6073 /* don't bother with additional async steps for reads, right now */
6074 if (bio_op(bio) == REQ_OP_READ) {
6075 bio_get(bio);
6076 btrfsic_submit_bio(bio);
6077 bio_put(bio);
6078 return;
6079 }
6080
6081 /*
6082 * nr_async_bios allows us to reliably return congestion to the
6083 * higher layers. Otherwise, the async bio makes it appear we have
6084 * made progress against dirty pages when we've really just put it
6085 * on a queue for later
6086 */
6087 atomic_inc(&root->fs_info->nr_async_bios);
6088 WARN_ON(bio->bi_next);
6089 bio->bi_next = NULL;
6090
6091 spin_lock(&device->io_lock);
6092 if (bio->bi_rw & REQ_SYNC)
6093 pending_bios = &device->pending_sync_bios;
6094 else
6095 pending_bios = &device->pending_bios;
6096
6097 if (pending_bios->tail)
6098 pending_bios->tail->bi_next = bio;
6099
6100 pending_bios->tail = bio;
6101 if (!pending_bios->head)
6102 pending_bios->head = bio;
6103 if (device->running_pending)
6104 should_queue = 0;
6105
6106 spin_unlock(&device->io_lock);
6107
6108 if (should_queue)
6109 btrfs_queue_work(root->fs_info->submit_workers,
6110 &device->work);
6111 }
6112
6113 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
6114 struct bio *bio, u64 physical, int dev_nr,
6115 int async)
6116 {
6117 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6118
6119 bio->bi_private = bbio;
6120 btrfs_io_bio(bio)->stripe_index = dev_nr;
6121 bio->bi_end_io = btrfs_end_bio;
6122 bio->bi_iter.bi_sector = physical >> 9;
6123 #ifdef DEBUG
6124 {
6125 struct rcu_string *name;
6126
6127 rcu_read_lock();
6128 name = rcu_dereference(dev->name);
6129 pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
6130 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
6131 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6132 name->str, dev->devid, bio->bi_iter.bi_size);
6133 rcu_read_unlock();
6134 }
6135 #endif
6136 bio->bi_bdev = dev->bdev;
6137
6138 btrfs_bio_counter_inc_noblocked(root->fs_info);
6139
6140 if (async)
6141 btrfs_schedule_bio(root, dev, bio);
6142 else
6143 btrfsic_submit_bio(bio);
6144 }
6145
6146 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6147 {
6148 atomic_inc(&bbio->error);
6149 if (atomic_dec_and_test(&bbio->stripes_pending)) {
6150 /* Should be the original bio. */
6151 WARN_ON(bio != bbio->orig_bio);
6152
6153 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6154 bio->bi_iter.bi_sector = logical >> 9;
6155 bio->bi_error = -EIO;
6156 btrfs_end_bbio(bbio, bio);
6157 }
6158 }
6159
6160 int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
6161 int mirror_num, int async_submit)
6162 {
6163 struct btrfs_device *dev;
6164 struct bio *first_bio = bio;
6165 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6166 u64 length = 0;
6167 u64 map_length;
6168 int ret;
6169 int dev_nr;
6170 int total_devs;
6171 struct btrfs_bio *bbio = NULL;
6172
6173 length = bio->bi_iter.bi_size;
6174 map_length = length;
6175
6176 btrfs_bio_counter_inc_blocked(root->fs_info);
6177 ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
6178 &map_length, &bbio, mirror_num, 1);
6179 if (ret) {
6180 btrfs_bio_counter_dec(root->fs_info);
6181 return ret;
6182 }
6183
6184 total_devs = bbio->num_stripes;
6185 bbio->orig_bio = first_bio;
6186 bbio->private = first_bio->bi_private;
6187 bbio->end_io = first_bio->bi_end_io;
6188 bbio->fs_info = root->fs_info;
6189 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6190
6191 if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6192 ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6193 /* In this case, map_length has been set to the length of
6194 a single stripe; not the whole write */
6195 if (bio_op(bio) == REQ_OP_WRITE) {
6196 ret = raid56_parity_write(root, bio, bbio, map_length);
6197 } else {
6198 ret = raid56_parity_recover(root, bio, bbio, map_length,
6199 mirror_num, 1);
6200 }
6201
6202 btrfs_bio_counter_dec(root->fs_info);
6203 return ret;
6204 }
6205
6206 if (map_length < length) {
6207 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6208 logical, length, map_length);
6209 BUG();
6210 }
6211
6212 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6213 dev = bbio->stripes[dev_nr].dev;
6214 if (!dev || !dev->bdev ||
6215 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
6216 bbio_error(bbio, first_bio, logical);
6217 continue;
6218 }
6219
6220 if (dev_nr < total_devs - 1) {
6221 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6222 BUG_ON(!bio); /* -ENOMEM */
6223 } else
6224 bio = first_bio;
6225
6226 submit_stripe_bio(root, bbio, bio,
6227 bbio->stripes[dev_nr].physical, dev_nr,
6228 async_submit);
6229 }
6230 btrfs_bio_counter_dec(root->fs_info);
6231 return 0;
6232 }
6233
6234 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6235 u8 *uuid, u8 *fsid)
6236 {
6237 struct btrfs_device *device;
6238 struct btrfs_fs_devices *cur_devices;
6239
6240 cur_devices = fs_info->fs_devices;
6241 while (cur_devices) {
6242 if (!fsid ||
6243 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6244 device = __find_device(&cur_devices->devices,
6245 devid, uuid);
6246 if (device)
6247 return device;
6248 }
6249 cur_devices = cur_devices->seed;
6250 }
6251 return NULL;
6252 }
6253
6254 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6255 struct btrfs_fs_devices *fs_devices,
6256 u64 devid, u8 *dev_uuid)
6257 {
6258 struct btrfs_device *device;
6259
6260 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6261 if (IS_ERR(device))
6262 return NULL;
6263
6264 list_add(&device->dev_list, &fs_devices->devices);
6265 device->fs_devices = fs_devices;
6266 fs_devices->num_devices++;
6267
6268 device->missing = 1;
6269 fs_devices->missing_devices++;
6270
6271 return device;
6272 }
6273
6274 /**
6275 * btrfs_alloc_device - allocate struct btrfs_device
6276 * @fs_info: used only for generating a new devid, can be NULL if
6277 * devid is provided (i.e. @devid != NULL).
6278 * @devid: a pointer to devid for this device. If NULL a new devid
6279 * is generated.
6280 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6281 * is generated.
6282 *
6283 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6284 * on error. Returned struct is not linked onto any lists and can be
6285 * destroyed with kfree() right away.
6286 */
6287 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6288 const u64 *devid,
6289 const u8 *uuid)
6290 {
6291 struct btrfs_device *dev;
6292 u64 tmp;
6293
6294 if (WARN_ON(!devid && !fs_info))
6295 return ERR_PTR(-EINVAL);
6296
6297 dev = __alloc_device();
6298 if (IS_ERR(dev))
6299 return dev;
6300
6301 if (devid)
6302 tmp = *devid;
6303 else {
6304 int ret;
6305
6306 ret = find_next_devid(fs_info, &tmp);
6307 if (ret) {
6308 kfree(dev);
6309 return ERR_PTR(ret);
6310 }
6311 }
6312 dev->devid = tmp;
6313
6314 if (uuid)
6315 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6316 else
6317 generate_random_uuid(dev->uuid);
6318
6319 btrfs_init_work(&dev->work, btrfs_submit_helper,
6320 pending_bios_fn, NULL, NULL);
6321
6322 return dev;
6323 }
6324
6325 /* Return -EIO if any error, otherwise return 0. */
6326 static int btrfs_check_chunk_valid(struct btrfs_root *root,
6327 struct extent_buffer *leaf,
6328 struct btrfs_chunk *chunk, u64 logical)
6329 {
6330 u64 length;
6331 u64 stripe_len;
6332 u16 num_stripes;
6333 u16 sub_stripes;
6334 u64 type;
6335
6336 length = btrfs_chunk_length(leaf, chunk);
6337 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6338 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6339 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6340 type = btrfs_chunk_type(leaf, chunk);
6341
6342 if (!num_stripes) {
6343 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6344 num_stripes);
6345 return -EIO;
6346 }
6347 if (!IS_ALIGNED(logical, root->sectorsize)) {
6348 btrfs_err(root->fs_info,
6349 "invalid chunk logical %llu", logical);
6350 return -EIO;
6351 }
6352 if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
6353 btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
6354 btrfs_chunk_sector_size(leaf, chunk));
6355 return -EIO;
6356 }
6357 if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6358 btrfs_err(root->fs_info,
6359 "invalid chunk length %llu", length);
6360 return -EIO;
6361 }
6362 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
6363 btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6364 stripe_len);
6365 return -EIO;
6366 }
6367 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6368 type) {
6369 btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6370 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6371 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6372 btrfs_chunk_type(leaf, chunk));
6373 return -EIO;
6374 }
6375 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6376 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
6377 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6378 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6379 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
6380 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6381 num_stripes != 1)) {
6382 btrfs_err(root->fs_info,
6383 "invalid num_stripes:sub_stripes %u:%u for profile %llu",
6384 num_stripes, sub_stripes,
6385 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6386 return -EIO;
6387 }
6388
6389 return 0;
6390 }
6391
6392 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6393 struct extent_buffer *leaf,
6394 struct btrfs_chunk *chunk)
6395 {
6396 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6397 struct map_lookup *map;
6398 struct extent_map *em;
6399 u64 logical;
6400 u64 length;
6401 u64 stripe_len;
6402 u64 devid;
6403 u8 uuid[BTRFS_UUID_SIZE];
6404 int num_stripes;
6405 int ret;
6406 int i;
6407
6408 logical = key->offset;
6409 length = btrfs_chunk_length(leaf, chunk);
6410 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6411 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6412
6413 ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
6414 if (ret)
6415 return ret;
6416
6417 read_lock(&map_tree->map_tree.lock);
6418 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6419 read_unlock(&map_tree->map_tree.lock);
6420
6421 /* already mapped? */
6422 if (em && em->start <= logical && em->start + em->len > logical) {
6423 free_extent_map(em);
6424 return 0;
6425 } else if (em) {
6426 free_extent_map(em);
6427 }
6428
6429 em = alloc_extent_map();
6430 if (!em)
6431 return -ENOMEM;
6432 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6433 if (!map) {
6434 free_extent_map(em);
6435 return -ENOMEM;
6436 }
6437
6438 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6439 em->map_lookup = map;
6440 em->start = logical;
6441 em->len = length;
6442 em->orig_start = 0;
6443 em->block_start = 0;
6444 em->block_len = em->len;
6445
6446 map->num_stripes = num_stripes;
6447 map->io_width = btrfs_chunk_io_width(leaf, chunk);
6448 map->io_align = btrfs_chunk_io_align(leaf, chunk);
6449 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6450 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6451 map->type = btrfs_chunk_type(leaf, chunk);
6452 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6453 for (i = 0; i < num_stripes; i++) {
6454 map->stripes[i].physical =
6455 btrfs_stripe_offset_nr(leaf, chunk, i);
6456 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6457 read_extent_buffer(leaf, uuid, (unsigned long)
6458 btrfs_stripe_dev_uuid_nr(chunk, i),
6459 BTRFS_UUID_SIZE);
6460 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6461 uuid, NULL);
6462 if (!map->stripes[i].dev &&
6463 !btrfs_test_opt(root->fs_info, DEGRADED)) {
6464 free_extent_map(em);
6465 return -EIO;
6466 }
6467 if (!map->stripes[i].dev) {
6468 map->stripes[i].dev =
6469 add_missing_dev(root, root->fs_info->fs_devices,
6470 devid, uuid);
6471 if (!map->stripes[i].dev) {
6472 free_extent_map(em);
6473 return -EIO;
6474 }
6475 btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6476 devid, uuid);
6477 }
6478 map->stripes[i].dev->in_fs_metadata = 1;
6479 }
6480
6481 write_lock(&map_tree->map_tree.lock);
6482 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6483 write_unlock(&map_tree->map_tree.lock);
6484 BUG_ON(ret); /* Tree corruption */
6485 free_extent_map(em);
6486
6487 return 0;
6488 }
6489
6490 static void fill_device_from_item(struct extent_buffer *leaf,
6491 struct btrfs_dev_item *dev_item,
6492 struct btrfs_device *device)
6493 {
6494 unsigned long ptr;
6495
6496 device->devid = btrfs_device_id(leaf, dev_item);
6497 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6498 device->total_bytes = device->disk_total_bytes;
6499 device->commit_total_bytes = device->disk_total_bytes;
6500 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6501 device->commit_bytes_used = device->bytes_used;
6502 device->type = btrfs_device_type(leaf, dev_item);
6503 device->io_align = btrfs_device_io_align(leaf, dev_item);
6504 device->io_width = btrfs_device_io_width(leaf, dev_item);
6505 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6506 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6507 device->is_tgtdev_for_dev_replace = 0;
6508
6509 ptr = btrfs_device_uuid(dev_item);
6510 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6511 }
6512
6513 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6514 u8 *fsid)
6515 {
6516 struct btrfs_fs_devices *fs_devices;
6517 int ret;
6518
6519 BUG_ON(!mutex_is_locked(&uuid_mutex));
6520
6521 fs_devices = root->fs_info->fs_devices->seed;
6522 while (fs_devices) {
6523 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6524 return fs_devices;
6525
6526 fs_devices = fs_devices->seed;
6527 }
6528
6529 fs_devices = find_fsid(fsid);
6530 if (!fs_devices) {
6531 if (!btrfs_test_opt(root->fs_info, DEGRADED))
6532 return ERR_PTR(-ENOENT);
6533
6534 fs_devices = alloc_fs_devices(fsid);
6535 if (IS_ERR(fs_devices))
6536 return fs_devices;
6537
6538 fs_devices->seeding = 1;
6539 fs_devices->opened = 1;
6540 return fs_devices;
6541 }
6542
6543 fs_devices = clone_fs_devices(fs_devices);
6544 if (IS_ERR(fs_devices))
6545 return fs_devices;
6546
6547 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6548 root->fs_info->bdev_holder);
6549 if (ret) {
6550 free_fs_devices(fs_devices);
6551 fs_devices = ERR_PTR(ret);
6552 goto out;
6553 }
6554
6555 if (!fs_devices->seeding) {
6556 __btrfs_close_devices(fs_devices);
6557 free_fs_devices(fs_devices);
6558 fs_devices = ERR_PTR(-EINVAL);
6559 goto out;
6560 }
6561
6562 fs_devices->seed = root->fs_info->fs_devices->seed;
6563 root->fs_info->fs_devices->seed = fs_devices;
6564 out:
6565 return fs_devices;
6566 }
6567
6568 static int read_one_dev(struct btrfs_root *root,
6569 struct extent_buffer *leaf,
6570 struct btrfs_dev_item *dev_item)
6571 {
6572 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6573 struct btrfs_device *device;
6574 u64 devid;
6575 int ret;
6576 u8 fs_uuid[BTRFS_UUID_SIZE];
6577 u8 dev_uuid[BTRFS_UUID_SIZE];
6578
6579 devid = btrfs_device_id(leaf, dev_item);
6580 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6581 BTRFS_UUID_SIZE);
6582 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6583 BTRFS_UUID_SIZE);
6584
6585 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6586 fs_devices = open_seed_devices(root, fs_uuid);
6587 if (IS_ERR(fs_devices))
6588 return PTR_ERR(fs_devices);
6589 }
6590
6591 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6592 if (!device) {
6593 if (!btrfs_test_opt(root->fs_info, DEGRADED))
6594 return -EIO;
6595
6596 device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6597 if (!device)
6598 return -ENOMEM;
6599 btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6600 devid, dev_uuid);
6601 } else {
6602 if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
6603 return -EIO;
6604
6605 if(!device->bdev && !device->missing) {
6606 /*
6607 * this happens when a device that was properly setup
6608 * in the device info lists suddenly goes bad.
6609 * device->bdev is NULL, and so we have to set
6610 * device->missing to one here
6611 */
6612 device->fs_devices->missing_devices++;
6613 device->missing = 1;
6614 }
6615
6616 /* Move the device to its own fs_devices */
6617 if (device->fs_devices != fs_devices) {
6618 ASSERT(device->missing);
6619
6620 list_move(&device->dev_list, &fs_devices->devices);
6621 device->fs_devices->num_devices--;
6622 fs_devices->num_devices++;
6623
6624 device->fs_devices->missing_devices--;
6625 fs_devices->missing_devices++;
6626
6627 device->fs_devices = fs_devices;
6628 }
6629 }
6630
6631 if (device->fs_devices != root->fs_info->fs_devices) {
6632 BUG_ON(device->writeable);
6633 if (device->generation !=
6634 btrfs_device_generation(leaf, dev_item))
6635 return -EINVAL;
6636 }
6637
6638 fill_device_from_item(leaf, dev_item, device);
6639 device->in_fs_metadata = 1;
6640 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6641 device->fs_devices->total_rw_bytes += device->total_bytes;
6642 spin_lock(&root->fs_info->free_chunk_lock);
6643 root->fs_info->free_chunk_space += device->total_bytes -
6644 device->bytes_used;
6645 spin_unlock(&root->fs_info->free_chunk_lock);
6646 }
6647 ret = 0;
6648 return ret;
6649 }
6650
6651 int btrfs_read_sys_array(struct btrfs_root *root)
6652 {
6653 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6654 struct extent_buffer *sb;
6655 struct btrfs_disk_key *disk_key;
6656 struct btrfs_chunk *chunk;
6657 u8 *array_ptr;
6658 unsigned long sb_array_offset;
6659 int ret = 0;
6660 u32 num_stripes;
6661 u32 array_size;
6662 u32 len = 0;
6663 u32 cur_offset;
6664 u64 type;
6665 struct btrfs_key key;
6666
6667 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6668 /*
6669 * This will create extent buffer of nodesize, superblock size is
6670 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6671 * overallocate but we can keep it as-is, only the first page is used.
6672 */
6673 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6674 if (IS_ERR(sb))
6675 return PTR_ERR(sb);
6676 set_extent_buffer_uptodate(sb);
6677 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6678 /*
6679 * The sb extent buffer is artificial and just used to read the system array.
6680 * set_extent_buffer_uptodate() call does not properly mark all it's
6681 * pages up-to-date when the page is larger: extent does not cover the
6682 * whole page and consequently check_page_uptodate does not find all
6683 * the page's extents up-to-date (the hole beyond sb),
6684 * write_extent_buffer then triggers a WARN_ON.
6685 *
6686 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6687 * but sb spans only this function. Add an explicit SetPageUptodate call
6688 * to silence the warning eg. on PowerPC 64.
6689 */
6690 if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6691 SetPageUptodate(sb->pages[0]);
6692
6693 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6694 array_size = btrfs_super_sys_array_size(super_copy);
6695
6696 array_ptr = super_copy->sys_chunk_array;
6697 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6698 cur_offset = 0;
6699
6700 while (cur_offset < array_size) {
6701 disk_key = (struct btrfs_disk_key *)array_ptr;
6702 len = sizeof(*disk_key);
6703 if (cur_offset + len > array_size)
6704 goto out_short_read;
6705
6706 btrfs_disk_key_to_cpu(&key, disk_key);
6707
6708 array_ptr += len;
6709 sb_array_offset += len;
6710 cur_offset += len;
6711
6712 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6713 chunk = (struct btrfs_chunk *)sb_array_offset;
6714 /*
6715 * At least one btrfs_chunk with one stripe must be
6716 * present, exact stripe count check comes afterwards
6717 */
6718 len = btrfs_chunk_item_size(1);
6719 if (cur_offset + len > array_size)
6720 goto out_short_read;
6721
6722 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6723 if (!num_stripes) {
6724 printk(KERN_ERR
6725 "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6726 num_stripes, cur_offset);
6727 ret = -EIO;
6728 break;
6729 }
6730
6731 type = btrfs_chunk_type(sb, chunk);
6732 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6733 btrfs_err(root->fs_info,
6734 "invalid chunk type %llu in sys_array at offset %u",
6735 type, cur_offset);
6736 ret = -EIO;
6737 break;
6738 }
6739
6740 len = btrfs_chunk_item_size(num_stripes);
6741 if (cur_offset + len > array_size)
6742 goto out_short_read;
6743
6744 ret = read_one_chunk(root, &key, sb, chunk);
6745 if (ret)
6746 break;
6747 } else {
6748 printk(KERN_ERR
6749 "BTRFS: unexpected item type %u in sys_array at offset %u\n",
6750 (u32)key.type, cur_offset);
6751 ret = -EIO;
6752 break;
6753 }
6754 array_ptr += len;
6755 sb_array_offset += len;
6756 cur_offset += len;
6757 }
6758 clear_extent_buffer_uptodate(sb);
6759 free_extent_buffer_stale(sb);
6760 return ret;
6761
6762 out_short_read:
6763 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6764 len, cur_offset);
6765 clear_extent_buffer_uptodate(sb);
6766 free_extent_buffer_stale(sb);
6767 return -EIO;
6768 }
6769
6770 int btrfs_read_chunk_tree(struct btrfs_root *root)
6771 {
6772 struct btrfs_path *path;
6773 struct extent_buffer *leaf;
6774 struct btrfs_key key;
6775 struct btrfs_key found_key;
6776 int ret;
6777 int slot;
6778 u64 total_dev = 0;
6779
6780 root = root->fs_info->chunk_root;
6781
6782 path = btrfs_alloc_path();
6783 if (!path)
6784 return -ENOMEM;
6785
6786 mutex_lock(&uuid_mutex);
6787 lock_chunks(root);
6788
6789 /*
6790 * Read all device items, and then all the chunk items. All
6791 * device items are found before any chunk item (their object id
6792 * is smaller than the lowest possible object id for a chunk
6793 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6794 */
6795 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6796 key.offset = 0;
6797 key.type = 0;
6798 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6799 if (ret < 0)
6800 goto error;
6801 while (1) {
6802 leaf = path->nodes[0];
6803 slot = path->slots[0];
6804 if (slot >= btrfs_header_nritems(leaf)) {
6805 ret = btrfs_next_leaf(root, path);
6806 if (ret == 0)
6807 continue;
6808 if (ret < 0)
6809 goto error;
6810 break;
6811 }
6812 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6813 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6814 struct btrfs_dev_item *dev_item;
6815 dev_item = btrfs_item_ptr(leaf, slot,
6816 struct btrfs_dev_item);
6817 ret = read_one_dev(root, leaf, dev_item);
6818 if (ret)
6819 goto error;
6820 total_dev++;
6821 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6822 struct btrfs_chunk *chunk;
6823 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6824 ret = read_one_chunk(root, &found_key, leaf, chunk);
6825 if (ret)
6826 goto error;
6827 }
6828 path->slots[0]++;
6829 }
6830
6831 /*
6832 * After loading chunk tree, we've got all device information,
6833 * do another round of validation checks.
6834 */
6835 if (total_dev != root->fs_info->fs_devices->total_devices) {
6836 btrfs_err(root->fs_info,
6837 "super_num_devices %llu mismatch with num_devices %llu found here",
6838 btrfs_super_num_devices(root->fs_info->super_copy),
6839 total_dev);
6840 ret = -EINVAL;
6841 goto error;
6842 }
6843 if (btrfs_super_total_bytes(root->fs_info->super_copy) <
6844 root->fs_info->fs_devices->total_rw_bytes) {
6845 btrfs_err(root->fs_info,
6846 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6847 btrfs_super_total_bytes(root->fs_info->super_copy),
6848 root->fs_info->fs_devices->total_rw_bytes);
6849 ret = -EINVAL;
6850 goto error;
6851 }
6852 ret = 0;
6853 error:
6854 unlock_chunks(root);
6855 mutex_unlock(&uuid_mutex);
6856
6857 btrfs_free_path(path);
6858 return ret;
6859 }
6860
6861 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6862 {
6863 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6864 struct btrfs_device *device;
6865
6866 while (fs_devices) {
6867 mutex_lock(&fs_devices->device_list_mutex);
6868 list_for_each_entry(device, &fs_devices->devices, dev_list)
6869 device->dev_root = fs_info->dev_root;
6870 mutex_unlock(&fs_devices->device_list_mutex);
6871
6872 fs_devices = fs_devices->seed;
6873 }
6874 }
6875
6876 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6877 {
6878 int i;
6879
6880 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6881 btrfs_dev_stat_reset(dev, i);
6882 }
6883
6884 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6885 {
6886 struct btrfs_key key;
6887 struct btrfs_key found_key;
6888 struct btrfs_root *dev_root = fs_info->dev_root;
6889 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6890 struct extent_buffer *eb;
6891 int slot;
6892 int ret = 0;
6893 struct btrfs_device *device;
6894 struct btrfs_path *path = NULL;
6895 int i;
6896
6897 path = btrfs_alloc_path();
6898 if (!path) {
6899 ret = -ENOMEM;
6900 goto out;
6901 }
6902
6903 mutex_lock(&fs_devices->device_list_mutex);
6904 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6905 int item_size;
6906 struct btrfs_dev_stats_item *ptr;
6907
6908 key.objectid = BTRFS_DEV_STATS_OBJECTID;
6909 key.type = BTRFS_PERSISTENT_ITEM_KEY;
6910 key.offset = device->devid;
6911 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6912 if (ret) {
6913 __btrfs_reset_dev_stats(device);
6914 device->dev_stats_valid = 1;
6915 btrfs_release_path(path);
6916 continue;
6917 }
6918 slot = path->slots[0];
6919 eb = path->nodes[0];
6920 btrfs_item_key_to_cpu(eb, &found_key, slot);
6921 item_size = btrfs_item_size_nr(eb, slot);
6922
6923 ptr = btrfs_item_ptr(eb, slot,
6924 struct btrfs_dev_stats_item);
6925
6926 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6927 if (item_size >= (1 + i) * sizeof(__le64))
6928 btrfs_dev_stat_set(device, i,
6929 btrfs_dev_stats_value(eb, ptr, i));
6930 else
6931 btrfs_dev_stat_reset(device, i);
6932 }
6933
6934 device->dev_stats_valid = 1;
6935 btrfs_dev_stat_print_on_load(device);
6936 btrfs_release_path(path);
6937 }
6938 mutex_unlock(&fs_devices->device_list_mutex);
6939
6940 out:
6941 btrfs_free_path(path);
6942 return ret < 0 ? ret : 0;
6943 }
6944
6945 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6946 struct btrfs_root *dev_root,
6947 struct btrfs_device *device)
6948 {
6949 struct btrfs_path *path;
6950 struct btrfs_key key;
6951 struct extent_buffer *eb;
6952 struct btrfs_dev_stats_item *ptr;
6953 int ret;
6954 int i;
6955
6956 key.objectid = BTRFS_DEV_STATS_OBJECTID;
6957 key.type = BTRFS_PERSISTENT_ITEM_KEY;
6958 key.offset = device->devid;
6959
6960 path = btrfs_alloc_path();
6961 BUG_ON(!path);
6962 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6963 if (ret < 0) {
6964 btrfs_warn_in_rcu(dev_root->fs_info,
6965 "error %d while searching for dev_stats item for device %s",
6966 ret, rcu_str_deref(device->name));
6967 goto out;
6968 }
6969
6970 if (ret == 0 &&
6971 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6972 /* need to delete old one and insert a new one */
6973 ret = btrfs_del_item(trans, dev_root, path);
6974 if (ret != 0) {
6975 btrfs_warn_in_rcu(dev_root->fs_info,
6976 "delete too small dev_stats item for device %s failed %d",
6977 rcu_str_deref(device->name), ret);
6978 goto out;
6979 }
6980 ret = 1;
6981 }
6982
6983 if (ret == 1) {
6984 /* need to insert a new item */
6985 btrfs_release_path(path);
6986 ret = btrfs_insert_empty_item(trans, dev_root, path,
6987 &key, sizeof(*ptr));
6988 if (ret < 0) {
6989 btrfs_warn_in_rcu(dev_root->fs_info,
6990 "insert dev_stats item for device %s failed %d",
6991 rcu_str_deref(device->name), ret);
6992 goto out;
6993 }
6994 }
6995
6996 eb = path->nodes[0];
6997 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6998 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6999 btrfs_set_dev_stats_value(eb, ptr, i,
7000 btrfs_dev_stat_read(device, i));
7001 btrfs_mark_buffer_dirty(eb);
7002
7003 out:
7004 btrfs_free_path(path);
7005 return ret;
7006 }
7007
7008 /*
7009 * called from commit_transaction. Writes all changed device stats to disk.
7010 */
7011 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7012 struct btrfs_fs_info *fs_info)
7013 {
7014 struct btrfs_root *dev_root = fs_info->dev_root;
7015 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7016 struct btrfs_device *device;
7017 int stats_cnt;
7018 int ret = 0;
7019
7020 mutex_lock(&fs_devices->device_list_mutex);
7021 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7022 if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
7023 continue;
7024
7025 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7026 ret = update_dev_stat_item(trans, dev_root, device);
7027 if (!ret)
7028 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7029 }
7030 mutex_unlock(&fs_devices->device_list_mutex);
7031
7032 return ret;
7033 }
7034
7035 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7036 {
7037 btrfs_dev_stat_inc(dev, index);
7038 btrfs_dev_stat_print_on_error(dev);
7039 }
7040
7041 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7042 {
7043 if (!dev->dev_stats_valid)
7044 return;
7045 btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
7046 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7047 rcu_str_deref(dev->name),
7048 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7049 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7050 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7051 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7052 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7053 }
7054
7055 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7056 {
7057 int i;
7058
7059 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7060 if (btrfs_dev_stat_read(dev, i) != 0)
7061 break;
7062 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7063 return; /* all values == 0, suppress message */
7064
7065 btrfs_info_in_rcu(dev->dev_root->fs_info,
7066 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7067 rcu_str_deref(dev->name),
7068 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7069 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7070 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7071 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7072 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7073 }
7074
7075 int btrfs_get_dev_stats(struct btrfs_root *root,
7076 struct btrfs_ioctl_get_dev_stats *stats)
7077 {
7078 struct btrfs_device *dev;
7079 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7080 int i;
7081
7082 mutex_lock(&fs_devices->device_list_mutex);
7083 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
7084 mutex_unlock(&fs_devices->device_list_mutex);
7085
7086 if (!dev) {
7087 btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
7088 return -ENODEV;
7089 } else if (!dev->dev_stats_valid) {
7090 btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
7091 return -ENODEV;
7092 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7093 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7094 if (stats->nr_items > i)
7095 stats->values[i] =
7096 btrfs_dev_stat_read_and_reset(dev, i);
7097 else
7098 btrfs_dev_stat_reset(dev, i);
7099 }
7100 } else {
7101 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7102 if (stats->nr_items > i)
7103 stats->values[i] = btrfs_dev_stat_read(dev, i);
7104 }
7105 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7106 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7107 return 0;
7108 }
7109
7110 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
7111 {
7112 struct buffer_head *bh;
7113 struct btrfs_super_block *disk_super;
7114 int copy_num;
7115
7116 if (!bdev)
7117 return;
7118
7119 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7120 copy_num++) {
7121
7122 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7123 continue;
7124
7125 disk_super = (struct btrfs_super_block *)bh->b_data;
7126
7127 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7128 set_buffer_dirty(bh);
7129 sync_dirty_buffer(bh);
7130 brelse(bh);
7131 }
7132
7133 /* Notify udev that device has changed */
7134 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7135
7136 /* Update ctime/mtime for device path for libblkid */
7137 update_dev_time(device_path);
7138 }
7139
7140 /*
7141 * Update the size of all devices, which is used for writing out the
7142 * super blocks.
7143 */
7144 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7145 {
7146 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7147 struct btrfs_device *curr, *next;
7148
7149 if (list_empty(&fs_devices->resized_devices))
7150 return;
7151
7152 mutex_lock(&fs_devices->device_list_mutex);
7153 lock_chunks(fs_info->dev_root);
7154 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7155 resized_list) {
7156 list_del_init(&curr->resized_list);
7157 curr->commit_total_bytes = curr->disk_total_bytes;
7158 }
7159 unlock_chunks(fs_info->dev_root);
7160 mutex_unlock(&fs_devices->device_list_mutex);
7161 }
7162
7163 /* Must be invoked during the transaction commit */
7164 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
7165 struct btrfs_transaction *transaction)
7166 {
7167 struct extent_map *em;
7168 struct map_lookup *map;
7169 struct btrfs_device *dev;
7170 int i;
7171
7172 if (list_empty(&transaction->pending_chunks))
7173 return;
7174
7175 /* In order to kick the device replace finish process */
7176 lock_chunks(root);
7177 list_for_each_entry(em, &transaction->pending_chunks, list) {
7178 map = em->map_lookup;
7179
7180 for (i = 0; i < map->num_stripes; i++) {
7181 dev = map->stripes[i].dev;
7182 dev->commit_bytes_used = dev->bytes_used;
7183 }
7184 }
7185 unlock_chunks(root);
7186 }
7187
7188 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7189 {
7190 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7191 while (fs_devices) {
7192 fs_devices->fs_info = fs_info;
7193 fs_devices = fs_devices->seed;
7194 }
7195 }
7196
7197 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7198 {
7199 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7200 while (fs_devices) {
7201 fs_devices->fs_info = NULL;
7202 fs_devices = fs_devices->seed;
7203 }
7204 }
This page took 0.544497 seconds and 5 git commands to generate.