Btrfs: find_next_devid: root -> fs_info
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "extent_map.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "print-tree.h"
37 #include "volumes.h"
38 #include "raid56.h"
39 #include "async-thread.h"
40 #include "check-integrity.h"
41 #include "rcu-string.h"
42 #include "math.h"
43 #include "dev-replace.h"
44
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root,
47 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55
56 static void lock_chunks(struct btrfs_root *root)
57 {
58 mutex_lock(&root->fs_info->chunk_mutex);
59 }
60
61 static void unlock_chunks(struct btrfs_root *root)
62 {
63 mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65
66 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 {
68 struct btrfs_device *device;
69 WARN_ON(fs_devices->opened);
70 while (!list_empty(&fs_devices->devices)) {
71 device = list_entry(fs_devices->devices.next,
72 struct btrfs_device, dev_list);
73 list_del(&device->dev_list);
74 rcu_string_free(device->name);
75 kfree(device);
76 }
77 kfree(fs_devices);
78 }
79
80 static void btrfs_kobject_uevent(struct block_device *bdev,
81 enum kobject_action action)
82 {
83 int ret;
84
85 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
86 if (ret)
87 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
88 action,
89 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
90 &disk_to_dev(bdev->bd_disk)->kobj);
91 }
92
93 void btrfs_cleanup_fs_uuids(void)
94 {
95 struct btrfs_fs_devices *fs_devices;
96
97 while (!list_empty(&fs_uuids)) {
98 fs_devices = list_entry(fs_uuids.next,
99 struct btrfs_fs_devices, list);
100 list_del(&fs_devices->list);
101 free_fs_devices(fs_devices);
102 }
103 }
104
105 static noinline struct btrfs_device *__find_device(struct list_head *head,
106 u64 devid, u8 *uuid)
107 {
108 struct btrfs_device *dev;
109
110 list_for_each_entry(dev, head, dev_list) {
111 if (dev->devid == devid &&
112 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113 return dev;
114 }
115 }
116 return NULL;
117 }
118
119 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 {
121 struct btrfs_fs_devices *fs_devices;
122
123 list_for_each_entry(fs_devices, &fs_uuids, list) {
124 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
125 return fs_devices;
126 }
127 return NULL;
128 }
129
130 static int
131 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
132 int flush, struct block_device **bdev,
133 struct buffer_head **bh)
134 {
135 int ret;
136
137 *bdev = blkdev_get_by_path(device_path, flags, holder);
138
139 if (IS_ERR(*bdev)) {
140 ret = PTR_ERR(*bdev);
141 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
142 goto error;
143 }
144
145 if (flush)
146 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
147 ret = set_blocksize(*bdev, 4096);
148 if (ret) {
149 blkdev_put(*bdev, flags);
150 goto error;
151 }
152 invalidate_bdev(*bdev);
153 *bh = btrfs_read_dev_super(*bdev);
154 if (!*bh) {
155 ret = -EINVAL;
156 blkdev_put(*bdev, flags);
157 goto error;
158 }
159
160 return 0;
161
162 error:
163 *bdev = NULL;
164 *bh = NULL;
165 return ret;
166 }
167
168 static void requeue_list(struct btrfs_pending_bios *pending_bios,
169 struct bio *head, struct bio *tail)
170 {
171
172 struct bio *old_head;
173
174 old_head = pending_bios->head;
175 pending_bios->head = head;
176 if (pending_bios->tail)
177 tail->bi_next = old_head;
178 else
179 pending_bios->tail = tail;
180 }
181
182 /*
183 * we try to collect pending bios for a device so we don't get a large
184 * number of procs sending bios down to the same device. This greatly
185 * improves the schedulers ability to collect and merge the bios.
186 *
187 * But, it also turns into a long list of bios to process and that is sure
188 * to eventually make the worker thread block. The solution here is to
189 * make some progress and then put this work struct back at the end of
190 * the list if the block device is congested. This way, multiple devices
191 * can make progress from a single worker thread.
192 */
193 static noinline void run_scheduled_bios(struct btrfs_device *device)
194 {
195 struct bio *pending;
196 struct backing_dev_info *bdi;
197 struct btrfs_fs_info *fs_info;
198 struct btrfs_pending_bios *pending_bios;
199 struct bio *tail;
200 struct bio *cur;
201 int again = 0;
202 unsigned long num_run;
203 unsigned long batch_run = 0;
204 unsigned long limit;
205 unsigned long last_waited = 0;
206 int force_reg = 0;
207 int sync_pending = 0;
208 struct blk_plug plug;
209
210 /*
211 * this function runs all the bios we've collected for
212 * a particular device. We don't want to wander off to
213 * another device without first sending all of these down.
214 * So, setup a plug here and finish it off before we return
215 */
216 blk_start_plug(&plug);
217
218 bdi = blk_get_backing_dev_info(device->bdev);
219 fs_info = device->dev_root->fs_info;
220 limit = btrfs_async_submit_limit(fs_info);
221 limit = limit * 2 / 3;
222
223 loop:
224 spin_lock(&device->io_lock);
225
226 loop_lock:
227 num_run = 0;
228
229 /* take all the bios off the list at once and process them
230 * later on (without the lock held). But, remember the
231 * tail and other pointers so the bios can be properly reinserted
232 * into the list if we hit congestion
233 */
234 if (!force_reg && device->pending_sync_bios.head) {
235 pending_bios = &device->pending_sync_bios;
236 force_reg = 1;
237 } else {
238 pending_bios = &device->pending_bios;
239 force_reg = 0;
240 }
241
242 pending = pending_bios->head;
243 tail = pending_bios->tail;
244 WARN_ON(pending && !tail);
245
246 /*
247 * if pending was null this time around, no bios need processing
248 * at all and we can stop. Otherwise it'll loop back up again
249 * and do an additional check so no bios are missed.
250 *
251 * device->running_pending is used to synchronize with the
252 * schedule_bio code.
253 */
254 if (device->pending_sync_bios.head == NULL &&
255 device->pending_bios.head == NULL) {
256 again = 0;
257 device->running_pending = 0;
258 } else {
259 again = 1;
260 device->running_pending = 1;
261 }
262
263 pending_bios->head = NULL;
264 pending_bios->tail = NULL;
265
266 spin_unlock(&device->io_lock);
267
268 while (pending) {
269
270 rmb();
271 /* we want to work on both lists, but do more bios on the
272 * sync list than the regular list
273 */
274 if ((num_run > 32 &&
275 pending_bios != &device->pending_sync_bios &&
276 device->pending_sync_bios.head) ||
277 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
278 device->pending_bios.head)) {
279 spin_lock(&device->io_lock);
280 requeue_list(pending_bios, pending, tail);
281 goto loop_lock;
282 }
283
284 cur = pending;
285 pending = pending->bi_next;
286 cur->bi_next = NULL;
287
288 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
289 waitqueue_active(&fs_info->async_submit_wait))
290 wake_up(&fs_info->async_submit_wait);
291
292 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
293
294 /*
295 * if we're doing the sync list, record that our
296 * plug has some sync requests on it
297 *
298 * If we're doing the regular list and there are
299 * sync requests sitting around, unplug before
300 * we add more
301 */
302 if (pending_bios == &device->pending_sync_bios) {
303 sync_pending = 1;
304 } else if (sync_pending) {
305 blk_finish_plug(&plug);
306 blk_start_plug(&plug);
307 sync_pending = 0;
308 }
309
310 btrfsic_submit_bio(cur->bi_rw, cur);
311 num_run++;
312 batch_run++;
313 if (need_resched())
314 cond_resched();
315
316 /*
317 * we made progress, there is more work to do and the bdi
318 * is now congested. Back off and let other work structs
319 * run instead
320 */
321 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
322 fs_info->fs_devices->open_devices > 1) {
323 struct io_context *ioc;
324
325 ioc = current->io_context;
326
327 /*
328 * the main goal here is that we don't want to
329 * block if we're going to be able to submit
330 * more requests without blocking.
331 *
332 * This code does two great things, it pokes into
333 * the elevator code from a filesystem _and_
334 * it makes assumptions about how batching works.
335 */
336 if (ioc && ioc->nr_batch_requests > 0 &&
337 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
338 (last_waited == 0 ||
339 ioc->last_waited == last_waited)) {
340 /*
341 * we want to go through our batch of
342 * requests and stop. So, we copy out
343 * the ioc->last_waited time and test
344 * against it before looping
345 */
346 last_waited = ioc->last_waited;
347 if (need_resched())
348 cond_resched();
349 continue;
350 }
351 spin_lock(&device->io_lock);
352 requeue_list(pending_bios, pending, tail);
353 device->running_pending = 1;
354
355 spin_unlock(&device->io_lock);
356 btrfs_requeue_work(&device->work);
357 goto done;
358 }
359 /* unplug every 64 requests just for good measure */
360 if (batch_run % 64 == 0) {
361 blk_finish_plug(&plug);
362 blk_start_plug(&plug);
363 sync_pending = 0;
364 }
365 }
366
367 cond_resched();
368 if (again)
369 goto loop;
370
371 spin_lock(&device->io_lock);
372 if (device->pending_bios.head || device->pending_sync_bios.head)
373 goto loop_lock;
374 spin_unlock(&device->io_lock);
375
376 done:
377 blk_finish_plug(&plug);
378 }
379
380 static void pending_bios_fn(struct btrfs_work *work)
381 {
382 struct btrfs_device *device;
383
384 device = container_of(work, struct btrfs_device, work);
385 run_scheduled_bios(device);
386 }
387
388 static noinline int device_list_add(const char *path,
389 struct btrfs_super_block *disk_super,
390 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
391 {
392 struct btrfs_device *device;
393 struct btrfs_fs_devices *fs_devices;
394 struct rcu_string *name;
395 u64 found_transid = btrfs_super_generation(disk_super);
396
397 fs_devices = find_fsid(disk_super->fsid);
398 if (!fs_devices) {
399 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
400 if (!fs_devices)
401 return -ENOMEM;
402 INIT_LIST_HEAD(&fs_devices->devices);
403 INIT_LIST_HEAD(&fs_devices->alloc_list);
404 list_add(&fs_devices->list, &fs_uuids);
405 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
406 fs_devices->latest_devid = devid;
407 fs_devices->latest_trans = found_transid;
408 mutex_init(&fs_devices->device_list_mutex);
409 device = NULL;
410 } else {
411 device = __find_device(&fs_devices->devices, devid,
412 disk_super->dev_item.uuid);
413 }
414 if (!device) {
415 if (fs_devices->opened)
416 return -EBUSY;
417
418 device = kzalloc(sizeof(*device), GFP_NOFS);
419 if (!device) {
420 /* we can safely leave the fs_devices entry around */
421 return -ENOMEM;
422 }
423 device->devid = devid;
424 device->dev_stats_valid = 0;
425 device->work.func = pending_bios_fn;
426 memcpy(device->uuid, disk_super->dev_item.uuid,
427 BTRFS_UUID_SIZE);
428 spin_lock_init(&device->io_lock);
429
430 name = rcu_string_strdup(path, GFP_NOFS);
431 if (!name) {
432 kfree(device);
433 return -ENOMEM;
434 }
435 rcu_assign_pointer(device->name, name);
436 INIT_LIST_HEAD(&device->dev_alloc_list);
437
438 /* init readahead state */
439 spin_lock_init(&device->reada_lock);
440 device->reada_curr_zone = NULL;
441 atomic_set(&device->reada_in_flight, 0);
442 device->reada_next = 0;
443 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
444 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
445
446 mutex_lock(&fs_devices->device_list_mutex);
447 list_add_rcu(&device->dev_list, &fs_devices->devices);
448 mutex_unlock(&fs_devices->device_list_mutex);
449
450 device->fs_devices = fs_devices;
451 fs_devices->num_devices++;
452 } else if (!device->name || strcmp(device->name->str, path)) {
453 name = rcu_string_strdup(path, GFP_NOFS);
454 if (!name)
455 return -ENOMEM;
456 rcu_string_free(device->name);
457 rcu_assign_pointer(device->name, name);
458 if (device->missing) {
459 fs_devices->missing_devices--;
460 device->missing = 0;
461 }
462 }
463
464 if (found_transid > fs_devices->latest_trans) {
465 fs_devices->latest_devid = devid;
466 fs_devices->latest_trans = found_transid;
467 }
468 *fs_devices_ret = fs_devices;
469 return 0;
470 }
471
472 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
473 {
474 struct btrfs_fs_devices *fs_devices;
475 struct btrfs_device *device;
476 struct btrfs_device *orig_dev;
477
478 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
479 if (!fs_devices)
480 return ERR_PTR(-ENOMEM);
481
482 INIT_LIST_HEAD(&fs_devices->devices);
483 INIT_LIST_HEAD(&fs_devices->alloc_list);
484 INIT_LIST_HEAD(&fs_devices->list);
485 mutex_init(&fs_devices->device_list_mutex);
486 fs_devices->latest_devid = orig->latest_devid;
487 fs_devices->latest_trans = orig->latest_trans;
488 fs_devices->total_devices = orig->total_devices;
489 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
490
491 /* We have held the volume lock, it is safe to get the devices. */
492 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
493 struct rcu_string *name;
494
495 device = kzalloc(sizeof(*device), GFP_NOFS);
496 if (!device)
497 goto error;
498
499 /*
500 * This is ok to do without rcu read locked because we hold the
501 * uuid mutex so nothing we touch in here is going to disappear.
502 */
503 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
504 if (!name) {
505 kfree(device);
506 goto error;
507 }
508 rcu_assign_pointer(device->name, name);
509
510 device->devid = orig_dev->devid;
511 device->work.func = pending_bios_fn;
512 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
513 spin_lock_init(&device->io_lock);
514 INIT_LIST_HEAD(&device->dev_list);
515 INIT_LIST_HEAD(&device->dev_alloc_list);
516
517 list_add(&device->dev_list, &fs_devices->devices);
518 device->fs_devices = fs_devices;
519 fs_devices->num_devices++;
520 }
521 return fs_devices;
522 error:
523 free_fs_devices(fs_devices);
524 return ERR_PTR(-ENOMEM);
525 }
526
527 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
528 struct btrfs_fs_devices *fs_devices, int step)
529 {
530 struct btrfs_device *device, *next;
531
532 struct block_device *latest_bdev = NULL;
533 u64 latest_devid = 0;
534 u64 latest_transid = 0;
535
536 mutex_lock(&uuid_mutex);
537 again:
538 /* This is the initialized path, it is safe to release the devices. */
539 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
540 if (device->in_fs_metadata) {
541 if (!device->is_tgtdev_for_dev_replace &&
542 (!latest_transid ||
543 device->generation > latest_transid)) {
544 latest_devid = device->devid;
545 latest_transid = device->generation;
546 latest_bdev = device->bdev;
547 }
548 continue;
549 }
550
551 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
552 /*
553 * In the first step, keep the device which has
554 * the correct fsid and the devid that is used
555 * for the dev_replace procedure.
556 * In the second step, the dev_replace state is
557 * read from the device tree and it is known
558 * whether the procedure is really active or
559 * not, which means whether this device is
560 * used or whether it should be removed.
561 */
562 if (step == 0 || device->is_tgtdev_for_dev_replace) {
563 continue;
564 }
565 }
566 if (device->bdev) {
567 blkdev_put(device->bdev, device->mode);
568 device->bdev = NULL;
569 fs_devices->open_devices--;
570 }
571 if (device->writeable) {
572 list_del_init(&device->dev_alloc_list);
573 device->writeable = 0;
574 if (!device->is_tgtdev_for_dev_replace)
575 fs_devices->rw_devices--;
576 }
577 list_del_init(&device->dev_list);
578 fs_devices->num_devices--;
579 rcu_string_free(device->name);
580 kfree(device);
581 }
582
583 if (fs_devices->seed) {
584 fs_devices = fs_devices->seed;
585 goto again;
586 }
587
588 fs_devices->latest_bdev = latest_bdev;
589 fs_devices->latest_devid = latest_devid;
590 fs_devices->latest_trans = latest_transid;
591
592 mutex_unlock(&uuid_mutex);
593 }
594
595 static void __free_device(struct work_struct *work)
596 {
597 struct btrfs_device *device;
598
599 device = container_of(work, struct btrfs_device, rcu_work);
600
601 if (device->bdev)
602 blkdev_put(device->bdev, device->mode);
603
604 rcu_string_free(device->name);
605 kfree(device);
606 }
607
608 static void free_device(struct rcu_head *head)
609 {
610 struct btrfs_device *device;
611
612 device = container_of(head, struct btrfs_device, rcu);
613
614 INIT_WORK(&device->rcu_work, __free_device);
615 schedule_work(&device->rcu_work);
616 }
617
618 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
619 {
620 struct btrfs_device *device;
621
622 if (--fs_devices->opened > 0)
623 return 0;
624
625 mutex_lock(&fs_devices->device_list_mutex);
626 list_for_each_entry(device, &fs_devices->devices, dev_list) {
627 struct btrfs_device *new_device;
628 struct rcu_string *name;
629
630 if (device->bdev)
631 fs_devices->open_devices--;
632
633 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
634 list_del_init(&device->dev_alloc_list);
635 fs_devices->rw_devices--;
636 }
637
638 if (device->can_discard)
639 fs_devices->num_can_discard--;
640
641 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
642 BUG_ON(!new_device); /* -ENOMEM */
643 memcpy(new_device, device, sizeof(*new_device));
644
645 /* Safe because we are under uuid_mutex */
646 if (device->name) {
647 name = rcu_string_strdup(device->name->str, GFP_NOFS);
648 BUG_ON(device->name && !name); /* -ENOMEM */
649 rcu_assign_pointer(new_device->name, name);
650 }
651 new_device->bdev = NULL;
652 new_device->writeable = 0;
653 new_device->in_fs_metadata = 0;
654 new_device->can_discard = 0;
655 spin_lock_init(&new_device->io_lock);
656 list_replace_rcu(&device->dev_list, &new_device->dev_list);
657
658 call_rcu(&device->rcu, free_device);
659 }
660 mutex_unlock(&fs_devices->device_list_mutex);
661
662 WARN_ON(fs_devices->open_devices);
663 WARN_ON(fs_devices->rw_devices);
664 fs_devices->opened = 0;
665 fs_devices->seeding = 0;
666
667 return 0;
668 }
669
670 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
671 {
672 struct btrfs_fs_devices *seed_devices = NULL;
673 int ret;
674
675 mutex_lock(&uuid_mutex);
676 ret = __btrfs_close_devices(fs_devices);
677 if (!fs_devices->opened) {
678 seed_devices = fs_devices->seed;
679 fs_devices->seed = NULL;
680 }
681 mutex_unlock(&uuid_mutex);
682
683 while (seed_devices) {
684 fs_devices = seed_devices;
685 seed_devices = fs_devices->seed;
686 __btrfs_close_devices(fs_devices);
687 free_fs_devices(fs_devices);
688 }
689 /*
690 * Wait for rcu kworkers under __btrfs_close_devices
691 * to finish all blkdev_puts so device is really
692 * free when umount is done.
693 */
694 rcu_barrier();
695 return ret;
696 }
697
698 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
699 fmode_t flags, void *holder)
700 {
701 struct request_queue *q;
702 struct block_device *bdev;
703 struct list_head *head = &fs_devices->devices;
704 struct btrfs_device *device;
705 struct block_device *latest_bdev = NULL;
706 struct buffer_head *bh;
707 struct btrfs_super_block *disk_super;
708 u64 latest_devid = 0;
709 u64 latest_transid = 0;
710 u64 devid;
711 int seeding = 1;
712 int ret = 0;
713
714 flags |= FMODE_EXCL;
715
716 list_for_each_entry(device, head, dev_list) {
717 if (device->bdev)
718 continue;
719 if (!device->name)
720 continue;
721
722 /* Just open everything we can; ignore failures here */
723 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
724 &bdev, &bh))
725 continue;
726
727 disk_super = (struct btrfs_super_block *)bh->b_data;
728 devid = btrfs_stack_device_id(&disk_super->dev_item);
729 if (devid != device->devid)
730 goto error_brelse;
731
732 if (memcmp(device->uuid, disk_super->dev_item.uuid,
733 BTRFS_UUID_SIZE))
734 goto error_brelse;
735
736 device->generation = btrfs_super_generation(disk_super);
737 if (!latest_transid || device->generation > latest_transid) {
738 latest_devid = devid;
739 latest_transid = device->generation;
740 latest_bdev = bdev;
741 }
742
743 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
744 device->writeable = 0;
745 } else {
746 device->writeable = !bdev_read_only(bdev);
747 seeding = 0;
748 }
749
750 q = bdev_get_queue(bdev);
751 if (blk_queue_discard(q)) {
752 device->can_discard = 1;
753 fs_devices->num_can_discard++;
754 }
755
756 device->bdev = bdev;
757 device->in_fs_metadata = 0;
758 device->mode = flags;
759
760 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
761 fs_devices->rotating = 1;
762
763 fs_devices->open_devices++;
764 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
765 fs_devices->rw_devices++;
766 list_add(&device->dev_alloc_list,
767 &fs_devices->alloc_list);
768 }
769 brelse(bh);
770 continue;
771
772 error_brelse:
773 brelse(bh);
774 blkdev_put(bdev, flags);
775 continue;
776 }
777 if (fs_devices->open_devices == 0) {
778 ret = -EINVAL;
779 goto out;
780 }
781 fs_devices->seeding = seeding;
782 fs_devices->opened = 1;
783 fs_devices->latest_bdev = latest_bdev;
784 fs_devices->latest_devid = latest_devid;
785 fs_devices->latest_trans = latest_transid;
786 fs_devices->total_rw_bytes = 0;
787 out:
788 return ret;
789 }
790
791 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
792 fmode_t flags, void *holder)
793 {
794 int ret;
795
796 mutex_lock(&uuid_mutex);
797 if (fs_devices->opened) {
798 fs_devices->opened++;
799 ret = 0;
800 } else {
801 ret = __btrfs_open_devices(fs_devices, flags, holder);
802 }
803 mutex_unlock(&uuid_mutex);
804 return ret;
805 }
806
807 /*
808 * Look for a btrfs signature on a device. This may be called out of the mount path
809 * and we are not allowed to call set_blocksize during the scan. The superblock
810 * is read via pagecache
811 */
812 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
813 struct btrfs_fs_devices **fs_devices_ret)
814 {
815 struct btrfs_super_block *disk_super;
816 struct block_device *bdev;
817 struct page *page;
818 void *p;
819 int ret = -EINVAL;
820 u64 devid;
821 u64 transid;
822 u64 total_devices;
823 u64 bytenr;
824 pgoff_t index;
825
826 /*
827 * we would like to check all the supers, but that would make
828 * a btrfs mount succeed after a mkfs from a different FS.
829 * So, we need to add a special mount option to scan for
830 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
831 */
832 bytenr = btrfs_sb_offset(0);
833 flags |= FMODE_EXCL;
834 mutex_lock(&uuid_mutex);
835
836 bdev = blkdev_get_by_path(path, flags, holder);
837
838 if (IS_ERR(bdev)) {
839 ret = PTR_ERR(bdev);
840 goto error;
841 }
842
843 /* make sure our super fits in the device */
844 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
845 goto error_bdev_put;
846
847 /* make sure our super fits in the page */
848 if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
849 goto error_bdev_put;
850
851 /* make sure our super doesn't straddle pages on disk */
852 index = bytenr >> PAGE_CACHE_SHIFT;
853 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
854 goto error_bdev_put;
855
856 /* pull in the page with our super */
857 page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
858 index, GFP_NOFS);
859
860 if (IS_ERR_OR_NULL(page))
861 goto error_bdev_put;
862
863 p = kmap(page);
864
865 /* align our pointer to the offset of the super block */
866 disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
867
868 if (btrfs_super_bytenr(disk_super) != bytenr ||
869 btrfs_super_magic(disk_super) != BTRFS_MAGIC)
870 goto error_unmap;
871
872 devid = btrfs_stack_device_id(&disk_super->dev_item);
873 transid = btrfs_super_generation(disk_super);
874 total_devices = btrfs_super_num_devices(disk_super);
875
876 if (disk_super->label[0]) {
877 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
878 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
879 printk(KERN_INFO "device label %s ", disk_super->label);
880 } else {
881 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
882 }
883
884 printk(KERN_CONT "devid %llu transid %llu %s\n",
885 (unsigned long long)devid, (unsigned long long)transid, path);
886
887 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
888 if (!ret && fs_devices_ret)
889 (*fs_devices_ret)->total_devices = total_devices;
890
891 error_unmap:
892 kunmap(page);
893 page_cache_release(page);
894
895 error_bdev_put:
896 blkdev_put(bdev, flags);
897 error:
898 mutex_unlock(&uuid_mutex);
899 return ret;
900 }
901
902 /* helper to account the used device space in the range */
903 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
904 u64 end, u64 *length)
905 {
906 struct btrfs_key key;
907 struct btrfs_root *root = device->dev_root;
908 struct btrfs_dev_extent *dev_extent;
909 struct btrfs_path *path;
910 u64 extent_end;
911 int ret;
912 int slot;
913 struct extent_buffer *l;
914
915 *length = 0;
916
917 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
918 return 0;
919
920 path = btrfs_alloc_path();
921 if (!path)
922 return -ENOMEM;
923 path->reada = 2;
924
925 key.objectid = device->devid;
926 key.offset = start;
927 key.type = BTRFS_DEV_EXTENT_KEY;
928
929 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
930 if (ret < 0)
931 goto out;
932 if (ret > 0) {
933 ret = btrfs_previous_item(root, path, key.objectid, key.type);
934 if (ret < 0)
935 goto out;
936 }
937
938 while (1) {
939 l = path->nodes[0];
940 slot = path->slots[0];
941 if (slot >= btrfs_header_nritems(l)) {
942 ret = btrfs_next_leaf(root, path);
943 if (ret == 0)
944 continue;
945 if (ret < 0)
946 goto out;
947
948 break;
949 }
950 btrfs_item_key_to_cpu(l, &key, slot);
951
952 if (key.objectid < device->devid)
953 goto next;
954
955 if (key.objectid > device->devid)
956 break;
957
958 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
959 goto next;
960
961 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
962 extent_end = key.offset + btrfs_dev_extent_length(l,
963 dev_extent);
964 if (key.offset <= start && extent_end > end) {
965 *length = end - start + 1;
966 break;
967 } else if (key.offset <= start && extent_end > start)
968 *length += extent_end - start;
969 else if (key.offset > start && extent_end <= end)
970 *length += extent_end - key.offset;
971 else if (key.offset > start && key.offset <= end) {
972 *length += end - key.offset + 1;
973 break;
974 } else if (key.offset > end)
975 break;
976
977 next:
978 path->slots[0]++;
979 }
980 ret = 0;
981 out:
982 btrfs_free_path(path);
983 return ret;
984 }
985
986 static int contains_pending_extent(struct btrfs_trans_handle *trans,
987 struct btrfs_device *device,
988 u64 *start, u64 len)
989 {
990 struct extent_map *em;
991 int ret = 0;
992
993 list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
994 struct map_lookup *map;
995 int i;
996
997 map = (struct map_lookup *)em->bdev;
998 for (i = 0; i < map->num_stripes; i++) {
999 if (map->stripes[i].dev != device)
1000 continue;
1001 if (map->stripes[i].physical >= *start + len ||
1002 map->stripes[i].physical + em->orig_block_len <=
1003 *start)
1004 continue;
1005 *start = map->stripes[i].physical +
1006 em->orig_block_len;
1007 ret = 1;
1008 }
1009 }
1010
1011 return ret;
1012 }
1013
1014
1015 /*
1016 * find_free_dev_extent - find free space in the specified device
1017 * @device: the device which we search the free space in
1018 * @num_bytes: the size of the free space that we need
1019 * @start: store the start of the free space.
1020 * @len: the size of the free space. that we find, or the size of the max
1021 * free space if we don't find suitable free space
1022 *
1023 * this uses a pretty simple search, the expectation is that it is
1024 * called very infrequently and that a given device has a small number
1025 * of extents
1026 *
1027 * @start is used to store the start of the free space if we find. But if we
1028 * don't find suitable free space, it will be used to store the start position
1029 * of the max free space.
1030 *
1031 * @len is used to store the size of the free space that we find.
1032 * But if we don't find suitable free space, it is used to store the size of
1033 * the max free space.
1034 */
1035 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1036 struct btrfs_device *device, u64 num_bytes,
1037 u64 *start, u64 *len)
1038 {
1039 struct btrfs_key key;
1040 struct btrfs_root *root = device->dev_root;
1041 struct btrfs_dev_extent *dev_extent;
1042 struct btrfs_path *path;
1043 u64 hole_size;
1044 u64 max_hole_start;
1045 u64 max_hole_size;
1046 u64 extent_end;
1047 u64 search_start;
1048 u64 search_end = device->total_bytes;
1049 int ret;
1050 int slot;
1051 struct extent_buffer *l;
1052
1053 /* FIXME use last free of some kind */
1054
1055 /* we don't want to overwrite the superblock on the drive,
1056 * so we make sure to start at an offset of at least 1MB
1057 */
1058 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1059
1060 path = btrfs_alloc_path();
1061 if (!path)
1062 return -ENOMEM;
1063 again:
1064 max_hole_start = search_start;
1065 max_hole_size = 0;
1066 hole_size = 0;
1067
1068 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1069 ret = -ENOSPC;
1070 goto out;
1071 }
1072
1073 path->reada = 2;
1074 path->search_commit_root = 1;
1075 path->skip_locking = 1;
1076
1077 key.objectid = device->devid;
1078 key.offset = search_start;
1079 key.type = BTRFS_DEV_EXTENT_KEY;
1080
1081 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1082 if (ret < 0)
1083 goto out;
1084 if (ret > 0) {
1085 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1086 if (ret < 0)
1087 goto out;
1088 }
1089
1090 while (1) {
1091 l = path->nodes[0];
1092 slot = path->slots[0];
1093 if (slot >= btrfs_header_nritems(l)) {
1094 ret = btrfs_next_leaf(root, path);
1095 if (ret == 0)
1096 continue;
1097 if (ret < 0)
1098 goto out;
1099
1100 break;
1101 }
1102 btrfs_item_key_to_cpu(l, &key, slot);
1103
1104 if (key.objectid < device->devid)
1105 goto next;
1106
1107 if (key.objectid > device->devid)
1108 break;
1109
1110 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1111 goto next;
1112
1113 if (key.offset > search_start) {
1114 hole_size = key.offset - search_start;
1115
1116 /*
1117 * Have to check before we set max_hole_start, otherwise
1118 * we could end up sending back this offset anyway.
1119 */
1120 if (contains_pending_extent(trans, device,
1121 &search_start,
1122 hole_size))
1123 hole_size = 0;
1124
1125 if (hole_size > max_hole_size) {
1126 max_hole_start = search_start;
1127 max_hole_size = hole_size;
1128 }
1129
1130 /*
1131 * If this free space is greater than which we need,
1132 * it must be the max free space that we have found
1133 * until now, so max_hole_start must point to the start
1134 * of this free space and the length of this free space
1135 * is stored in max_hole_size. Thus, we return
1136 * max_hole_start and max_hole_size and go back to the
1137 * caller.
1138 */
1139 if (hole_size >= num_bytes) {
1140 ret = 0;
1141 goto out;
1142 }
1143 }
1144
1145 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1146 extent_end = key.offset + btrfs_dev_extent_length(l,
1147 dev_extent);
1148 if (extent_end > search_start)
1149 search_start = extent_end;
1150 next:
1151 path->slots[0]++;
1152 cond_resched();
1153 }
1154
1155 /*
1156 * At this point, search_start should be the end of
1157 * allocated dev extents, and when shrinking the device,
1158 * search_end may be smaller than search_start.
1159 */
1160 if (search_end > search_start)
1161 hole_size = search_end - search_start;
1162
1163 if (hole_size > max_hole_size) {
1164 max_hole_start = search_start;
1165 max_hole_size = hole_size;
1166 }
1167
1168 if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1169 btrfs_release_path(path);
1170 goto again;
1171 }
1172
1173 /* See above. */
1174 if (hole_size < num_bytes)
1175 ret = -ENOSPC;
1176 else
1177 ret = 0;
1178
1179 out:
1180 btrfs_free_path(path);
1181 *start = max_hole_start;
1182 if (len)
1183 *len = max_hole_size;
1184 return ret;
1185 }
1186
1187 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1188 struct btrfs_device *device,
1189 u64 start)
1190 {
1191 int ret;
1192 struct btrfs_path *path;
1193 struct btrfs_root *root = device->dev_root;
1194 struct btrfs_key key;
1195 struct btrfs_key found_key;
1196 struct extent_buffer *leaf = NULL;
1197 struct btrfs_dev_extent *extent = NULL;
1198
1199 path = btrfs_alloc_path();
1200 if (!path)
1201 return -ENOMEM;
1202
1203 key.objectid = device->devid;
1204 key.offset = start;
1205 key.type = BTRFS_DEV_EXTENT_KEY;
1206 again:
1207 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1208 if (ret > 0) {
1209 ret = btrfs_previous_item(root, path, key.objectid,
1210 BTRFS_DEV_EXTENT_KEY);
1211 if (ret)
1212 goto out;
1213 leaf = path->nodes[0];
1214 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1215 extent = btrfs_item_ptr(leaf, path->slots[0],
1216 struct btrfs_dev_extent);
1217 BUG_ON(found_key.offset > start || found_key.offset +
1218 btrfs_dev_extent_length(leaf, extent) < start);
1219 key = found_key;
1220 btrfs_release_path(path);
1221 goto again;
1222 } else if (ret == 0) {
1223 leaf = path->nodes[0];
1224 extent = btrfs_item_ptr(leaf, path->slots[0],
1225 struct btrfs_dev_extent);
1226 } else {
1227 btrfs_error(root->fs_info, ret, "Slot search failed");
1228 goto out;
1229 }
1230
1231 if (device->bytes_used > 0) {
1232 u64 len = btrfs_dev_extent_length(leaf, extent);
1233 device->bytes_used -= len;
1234 spin_lock(&root->fs_info->free_chunk_lock);
1235 root->fs_info->free_chunk_space += len;
1236 spin_unlock(&root->fs_info->free_chunk_lock);
1237 }
1238 ret = btrfs_del_item(trans, root, path);
1239 if (ret) {
1240 btrfs_error(root->fs_info, ret,
1241 "Failed to remove dev extent item");
1242 }
1243 out:
1244 btrfs_free_path(path);
1245 return ret;
1246 }
1247
1248 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1249 struct btrfs_device *device,
1250 u64 chunk_tree, u64 chunk_objectid,
1251 u64 chunk_offset, u64 start, u64 num_bytes)
1252 {
1253 int ret;
1254 struct btrfs_path *path;
1255 struct btrfs_root *root = device->dev_root;
1256 struct btrfs_dev_extent *extent;
1257 struct extent_buffer *leaf;
1258 struct btrfs_key key;
1259
1260 WARN_ON(!device->in_fs_metadata);
1261 WARN_ON(device->is_tgtdev_for_dev_replace);
1262 path = btrfs_alloc_path();
1263 if (!path)
1264 return -ENOMEM;
1265
1266 key.objectid = device->devid;
1267 key.offset = start;
1268 key.type = BTRFS_DEV_EXTENT_KEY;
1269 ret = btrfs_insert_empty_item(trans, root, path, &key,
1270 sizeof(*extent));
1271 if (ret)
1272 goto out;
1273
1274 leaf = path->nodes[0];
1275 extent = btrfs_item_ptr(leaf, path->slots[0],
1276 struct btrfs_dev_extent);
1277 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1278 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1279 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1280
1281 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1282 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1283 BTRFS_UUID_SIZE);
1284
1285 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1286 btrfs_mark_buffer_dirty(leaf);
1287 out:
1288 btrfs_free_path(path);
1289 return ret;
1290 }
1291
1292 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1293 {
1294 struct extent_map_tree *em_tree;
1295 struct extent_map *em;
1296 struct rb_node *n;
1297 u64 ret = 0;
1298
1299 em_tree = &fs_info->mapping_tree.map_tree;
1300 read_lock(&em_tree->lock);
1301 n = rb_last(&em_tree->map);
1302 if (n) {
1303 em = rb_entry(n, struct extent_map, rb_node);
1304 ret = em->start + em->len;
1305 }
1306 read_unlock(&em_tree->lock);
1307
1308 return ret;
1309 }
1310
1311 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1312 u64 *devid_ret)
1313 {
1314 int ret;
1315 struct btrfs_key key;
1316 struct btrfs_key found_key;
1317 struct btrfs_path *path;
1318
1319 path = btrfs_alloc_path();
1320 if (!path)
1321 return -ENOMEM;
1322
1323 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1324 key.type = BTRFS_DEV_ITEM_KEY;
1325 key.offset = (u64)-1;
1326
1327 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1328 if (ret < 0)
1329 goto error;
1330
1331 BUG_ON(ret == 0); /* Corruption */
1332
1333 ret = btrfs_previous_item(fs_info->chunk_root, path,
1334 BTRFS_DEV_ITEMS_OBJECTID,
1335 BTRFS_DEV_ITEM_KEY);
1336 if (ret) {
1337 *devid_ret = 1;
1338 } else {
1339 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1340 path->slots[0]);
1341 *devid_ret = found_key.offset + 1;
1342 }
1343 ret = 0;
1344 error:
1345 btrfs_free_path(path);
1346 return ret;
1347 }
1348
1349 /*
1350 * the device information is stored in the chunk root
1351 * the btrfs_device struct should be fully filled in
1352 */
1353 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1354 struct btrfs_root *root,
1355 struct btrfs_device *device)
1356 {
1357 int ret;
1358 struct btrfs_path *path;
1359 struct btrfs_dev_item *dev_item;
1360 struct extent_buffer *leaf;
1361 struct btrfs_key key;
1362 unsigned long ptr;
1363
1364 root = root->fs_info->chunk_root;
1365
1366 path = btrfs_alloc_path();
1367 if (!path)
1368 return -ENOMEM;
1369
1370 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1371 key.type = BTRFS_DEV_ITEM_KEY;
1372 key.offset = device->devid;
1373
1374 ret = btrfs_insert_empty_item(trans, root, path, &key,
1375 sizeof(*dev_item));
1376 if (ret)
1377 goto out;
1378
1379 leaf = path->nodes[0];
1380 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1381
1382 btrfs_set_device_id(leaf, dev_item, device->devid);
1383 btrfs_set_device_generation(leaf, dev_item, 0);
1384 btrfs_set_device_type(leaf, dev_item, device->type);
1385 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1386 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1387 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1388 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1389 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1390 btrfs_set_device_group(leaf, dev_item, 0);
1391 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1392 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1393 btrfs_set_device_start_offset(leaf, dev_item, 0);
1394
1395 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1396 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1397 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1398 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1399 btrfs_mark_buffer_dirty(leaf);
1400
1401 ret = 0;
1402 out:
1403 btrfs_free_path(path);
1404 return ret;
1405 }
1406
1407 static int btrfs_rm_dev_item(struct btrfs_root *root,
1408 struct btrfs_device *device)
1409 {
1410 int ret;
1411 struct btrfs_path *path;
1412 struct btrfs_key key;
1413 struct btrfs_trans_handle *trans;
1414
1415 root = root->fs_info->chunk_root;
1416
1417 path = btrfs_alloc_path();
1418 if (!path)
1419 return -ENOMEM;
1420
1421 trans = btrfs_start_transaction(root, 0);
1422 if (IS_ERR(trans)) {
1423 btrfs_free_path(path);
1424 return PTR_ERR(trans);
1425 }
1426 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1427 key.type = BTRFS_DEV_ITEM_KEY;
1428 key.offset = device->devid;
1429 lock_chunks(root);
1430
1431 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1432 if (ret < 0)
1433 goto out;
1434
1435 if (ret > 0) {
1436 ret = -ENOENT;
1437 goto out;
1438 }
1439
1440 ret = btrfs_del_item(trans, root, path);
1441 if (ret)
1442 goto out;
1443 out:
1444 btrfs_free_path(path);
1445 unlock_chunks(root);
1446 btrfs_commit_transaction(trans, root);
1447 return ret;
1448 }
1449
1450 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1451 {
1452 struct btrfs_device *device;
1453 struct btrfs_device *next_device;
1454 struct block_device *bdev;
1455 struct buffer_head *bh = NULL;
1456 struct btrfs_super_block *disk_super;
1457 struct btrfs_fs_devices *cur_devices;
1458 u64 all_avail;
1459 u64 devid;
1460 u64 num_devices;
1461 u8 *dev_uuid;
1462 unsigned seq;
1463 int ret = 0;
1464 bool clear_super = false;
1465
1466 mutex_lock(&uuid_mutex);
1467
1468 do {
1469 seq = read_seqbegin(&root->fs_info->profiles_lock);
1470
1471 all_avail = root->fs_info->avail_data_alloc_bits |
1472 root->fs_info->avail_system_alloc_bits |
1473 root->fs_info->avail_metadata_alloc_bits;
1474 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1475
1476 num_devices = root->fs_info->fs_devices->num_devices;
1477 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1478 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1479 WARN_ON(num_devices < 1);
1480 num_devices--;
1481 }
1482 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1483
1484 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1485 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1486 goto out;
1487 }
1488
1489 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1490 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1491 goto out;
1492 }
1493
1494 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1495 root->fs_info->fs_devices->rw_devices <= 2) {
1496 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1497 goto out;
1498 }
1499 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1500 root->fs_info->fs_devices->rw_devices <= 3) {
1501 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1502 goto out;
1503 }
1504
1505 if (strcmp(device_path, "missing") == 0) {
1506 struct list_head *devices;
1507 struct btrfs_device *tmp;
1508
1509 device = NULL;
1510 devices = &root->fs_info->fs_devices->devices;
1511 /*
1512 * It is safe to read the devices since the volume_mutex
1513 * is held.
1514 */
1515 list_for_each_entry(tmp, devices, dev_list) {
1516 if (tmp->in_fs_metadata &&
1517 !tmp->is_tgtdev_for_dev_replace &&
1518 !tmp->bdev) {
1519 device = tmp;
1520 break;
1521 }
1522 }
1523 bdev = NULL;
1524 bh = NULL;
1525 disk_super = NULL;
1526 if (!device) {
1527 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1528 goto out;
1529 }
1530 } else {
1531 ret = btrfs_get_bdev_and_sb(device_path,
1532 FMODE_WRITE | FMODE_EXCL,
1533 root->fs_info->bdev_holder, 0,
1534 &bdev, &bh);
1535 if (ret)
1536 goto out;
1537 disk_super = (struct btrfs_super_block *)bh->b_data;
1538 devid = btrfs_stack_device_id(&disk_super->dev_item);
1539 dev_uuid = disk_super->dev_item.uuid;
1540 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1541 disk_super->fsid);
1542 if (!device) {
1543 ret = -ENOENT;
1544 goto error_brelse;
1545 }
1546 }
1547
1548 if (device->is_tgtdev_for_dev_replace) {
1549 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1550 goto error_brelse;
1551 }
1552
1553 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1554 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1555 goto error_brelse;
1556 }
1557
1558 if (device->writeable) {
1559 lock_chunks(root);
1560 list_del_init(&device->dev_alloc_list);
1561 unlock_chunks(root);
1562 root->fs_info->fs_devices->rw_devices--;
1563 clear_super = true;
1564 }
1565
1566 mutex_unlock(&uuid_mutex);
1567 ret = btrfs_shrink_device(device, 0);
1568 mutex_lock(&uuid_mutex);
1569 if (ret)
1570 goto error_undo;
1571
1572 /*
1573 * TODO: the superblock still includes this device in its num_devices
1574 * counter although write_all_supers() is not locked out. This
1575 * could give a filesystem state which requires a degraded mount.
1576 */
1577 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1578 if (ret)
1579 goto error_undo;
1580
1581 spin_lock(&root->fs_info->free_chunk_lock);
1582 root->fs_info->free_chunk_space = device->total_bytes -
1583 device->bytes_used;
1584 spin_unlock(&root->fs_info->free_chunk_lock);
1585
1586 device->in_fs_metadata = 0;
1587 btrfs_scrub_cancel_dev(root->fs_info, device);
1588
1589 /*
1590 * the device list mutex makes sure that we don't change
1591 * the device list while someone else is writing out all
1592 * the device supers.
1593 */
1594
1595 cur_devices = device->fs_devices;
1596 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1597 list_del_rcu(&device->dev_list);
1598
1599 device->fs_devices->num_devices--;
1600 device->fs_devices->total_devices--;
1601
1602 if (device->missing)
1603 root->fs_info->fs_devices->missing_devices--;
1604
1605 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1606 struct btrfs_device, dev_list);
1607 if (device->bdev == root->fs_info->sb->s_bdev)
1608 root->fs_info->sb->s_bdev = next_device->bdev;
1609 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1610 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1611
1612 if (device->bdev)
1613 device->fs_devices->open_devices--;
1614
1615 call_rcu(&device->rcu, free_device);
1616 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1617
1618 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1619 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1620
1621 if (cur_devices->open_devices == 0) {
1622 struct btrfs_fs_devices *fs_devices;
1623 fs_devices = root->fs_info->fs_devices;
1624 while (fs_devices) {
1625 if (fs_devices->seed == cur_devices)
1626 break;
1627 fs_devices = fs_devices->seed;
1628 }
1629 fs_devices->seed = cur_devices->seed;
1630 cur_devices->seed = NULL;
1631 lock_chunks(root);
1632 __btrfs_close_devices(cur_devices);
1633 unlock_chunks(root);
1634 free_fs_devices(cur_devices);
1635 }
1636
1637 root->fs_info->num_tolerated_disk_barrier_failures =
1638 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1639
1640 /*
1641 * at this point, the device is zero sized. We want to
1642 * remove it from the devices list and zero out the old super
1643 */
1644 if (clear_super && disk_super) {
1645 /* make sure this device isn't detected as part of
1646 * the FS anymore
1647 */
1648 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1649 set_buffer_dirty(bh);
1650 sync_dirty_buffer(bh);
1651 }
1652
1653 ret = 0;
1654
1655 /* Notify udev that device has changed */
1656 if (bdev)
1657 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1658
1659 error_brelse:
1660 brelse(bh);
1661 if (bdev)
1662 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1663 out:
1664 mutex_unlock(&uuid_mutex);
1665 return ret;
1666 error_undo:
1667 if (device->writeable) {
1668 lock_chunks(root);
1669 list_add(&device->dev_alloc_list,
1670 &root->fs_info->fs_devices->alloc_list);
1671 unlock_chunks(root);
1672 root->fs_info->fs_devices->rw_devices++;
1673 }
1674 goto error_brelse;
1675 }
1676
1677 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1678 struct btrfs_device *srcdev)
1679 {
1680 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1681 list_del_rcu(&srcdev->dev_list);
1682 list_del_rcu(&srcdev->dev_alloc_list);
1683 fs_info->fs_devices->num_devices--;
1684 if (srcdev->missing) {
1685 fs_info->fs_devices->missing_devices--;
1686 fs_info->fs_devices->rw_devices++;
1687 }
1688 if (srcdev->can_discard)
1689 fs_info->fs_devices->num_can_discard--;
1690 if (srcdev->bdev)
1691 fs_info->fs_devices->open_devices--;
1692
1693 call_rcu(&srcdev->rcu, free_device);
1694 }
1695
1696 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1697 struct btrfs_device *tgtdev)
1698 {
1699 struct btrfs_device *next_device;
1700
1701 WARN_ON(!tgtdev);
1702 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1703 if (tgtdev->bdev) {
1704 btrfs_scratch_superblock(tgtdev);
1705 fs_info->fs_devices->open_devices--;
1706 }
1707 fs_info->fs_devices->num_devices--;
1708 if (tgtdev->can_discard)
1709 fs_info->fs_devices->num_can_discard++;
1710
1711 next_device = list_entry(fs_info->fs_devices->devices.next,
1712 struct btrfs_device, dev_list);
1713 if (tgtdev->bdev == fs_info->sb->s_bdev)
1714 fs_info->sb->s_bdev = next_device->bdev;
1715 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1716 fs_info->fs_devices->latest_bdev = next_device->bdev;
1717 list_del_rcu(&tgtdev->dev_list);
1718
1719 call_rcu(&tgtdev->rcu, free_device);
1720
1721 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1722 }
1723
1724 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1725 struct btrfs_device **device)
1726 {
1727 int ret = 0;
1728 struct btrfs_super_block *disk_super;
1729 u64 devid;
1730 u8 *dev_uuid;
1731 struct block_device *bdev;
1732 struct buffer_head *bh;
1733
1734 *device = NULL;
1735 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1736 root->fs_info->bdev_holder, 0, &bdev, &bh);
1737 if (ret)
1738 return ret;
1739 disk_super = (struct btrfs_super_block *)bh->b_data;
1740 devid = btrfs_stack_device_id(&disk_super->dev_item);
1741 dev_uuid = disk_super->dev_item.uuid;
1742 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1743 disk_super->fsid);
1744 brelse(bh);
1745 if (!*device)
1746 ret = -ENOENT;
1747 blkdev_put(bdev, FMODE_READ);
1748 return ret;
1749 }
1750
1751 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1752 char *device_path,
1753 struct btrfs_device **device)
1754 {
1755 *device = NULL;
1756 if (strcmp(device_path, "missing") == 0) {
1757 struct list_head *devices;
1758 struct btrfs_device *tmp;
1759
1760 devices = &root->fs_info->fs_devices->devices;
1761 /*
1762 * It is safe to read the devices since the volume_mutex
1763 * is held by the caller.
1764 */
1765 list_for_each_entry(tmp, devices, dev_list) {
1766 if (tmp->in_fs_metadata && !tmp->bdev) {
1767 *device = tmp;
1768 break;
1769 }
1770 }
1771
1772 if (!*device) {
1773 pr_err("btrfs: no missing device found\n");
1774 return -ENOENT;
1775 }
1776
1777 return 0;
1778 } else {
1779 return btrfs_find_device_by_path(root, device_path, device);
1780 }
1781 }
1782
1783 /*
1784 * does all the dirty work required for changing file system's UUID.
1785 */
1786 static int btrfs_prepare_sprout(struct btrfs_root *root)
1787 {
1788 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1789 struct btrfs_fs_devices *old_devices;
1790 struct btrfs_fs_devices *seed_devices;
1791 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1792 struct btrfs_device *device;
1793 u64 super_flags;
1794
1795 BUG_ON(!mutex_is_locked(&uuid_mutex));
1796 if (!fs_devices->seeding)
1797 return -EINVAL;
1798
1799 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1800 if (!seed_devices)
1801 return -ENOMEM;
1802
1803 old_devices = clone_fs_devices(fs_devices);
1804 if (IS_ERR(old_devices)) {
1805 kfree(seed_devices);
1806 return PTR_ERR(old_devices);
1807 }
1808
1809 list_add(&old_devices->list, &fs_uuids);
1810
1811 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1812 seed_devices->opened = 1;
1813 INIT_LIST_HEAD(&seed_devices->devices);
1814 INIT_LIST_HEAD(&seed_devices->alloc_list);
1815 mutex_init(&seed_devices->device_list_mutex);
1816
1817 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1818 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1819 synchronize_rcu);
1820 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1821
1822 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1823 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1824 device->fs_devices = seed_devices;
1825 }
1826
1827 fs_devices->seeding = 0;
1828 fs_devices->num_devices = 0;
1829 fs_devices->open_devices = 0;
1830 fs_devices->total_devices = 0;
1831 fs_devices->seed = seed_devices;
1832
1833 generate_random_uuid(fs_devices->fsid);
1834 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1835 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1836 super_flags = btrfs_super_flags(disk_super) &
1837 ~BTRFS_SUPER_FLAG_SEEDING;
1838 btrfs_set_super_flags(disk_super, super_flags);
1839
1840 return 0;
1841 }
1842
1843 /*
1844 * strore the expected generation for seed devices in device items.
1845 */
1846 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1847 struct btrfs_root *root)
1848 {
1849 struct btrfs_path *path;
1850 struct extent_buffer *leaf;
1851 struct btrfs_dev_item *dev_item;
1852 struct btrfs_device *device;
1853 struct btrfs_key key;
1854 u8 fs_uuid[BTRFS_UUID_SIZE];
1855 u8 dev_uuid[BTRFS_UUID_SIZE];
1856 u64 devid;
1857 int ret;
1858
1859 path = btrfs_alloc_path();
1860 if (!path)
1861 return -ENOMEM;
1862
1863 root = root->fs_info->chunk_root;
1864 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1865 key.offset = 0;
1866 key.type = BTRFS_DEV_ITEM_KEY;
1867
1868 while (1) {
1869 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1870 if (ret < 0)
1871 goto error;
1872
1873 leaf = path->nodes[0];
1874 next_slot:
1875 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1876 ret = btrfs_next_leaf(root, path);
1877 if (ret > 0)
1878 break;
1879 if (ret < 0)
1880 goto error;
1881 leaf = path->nodes[0];
1882 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1883 btrfs_release_path(path);
1884 continue;
1885 }
1886
1887 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1888 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1889 key.type != BTRFS_DEV_ITEM_KEY)
1890 break;
1891
1892 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1893 struct btrfs_dev_item);
1894 devid = btrfs_device_id(leaf, dev_item);
1895 read_extent_buffer(leaf, dev_uuid,
1896 (unsigned long)btrfs_device_uuid(dev_item),
1897 BTRFS_UUID_SIZE);
1898 read_extent_buffer(leaf, fs_uuid,
1899 (unsigned long)btrfs_device_fsid(dev_item),
1900 BTRFS_UUID_SIZE);
1901 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1902 fs_uuid);
1903 BUG_ON(!device); /* Logic error */
1904
1905 if (device->fs_devices->seeding) {
1906 btrfs_set_device_generation(leaf, dev_item,
1907 device->generation);
1908 btrfs_mark_buffer_dirty(leaf);
1909 }
1910
1911 path->slots[0]++;
1912 goto next_slot;
1913 }
1914 ret = 0;
1915 error:
1916 btrfs_free_path(path);
1917 return ret;
1918 }
1919
1920 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1921 {
1922 struct request_queue *q;
1923 struct btrfs_trans_handle *trans;
1924 struct btrfs_device *device;
1925 struct block_device *bdev;
1926 struct list_head *devices;
1927 struct super_block *sb = root->fs_info->sb;
1928 struct rcu_string *name;
1929 u64 total_bytes;
1930 int seeding_dev = 0;
1931 int ret = 0;
1932
1933 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1934 return -EROFS;
1935
1936 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1937 root->fs_info->bdev_holder);
1938 if (IS_ERR(bdev))
1939 return PTR_ERR(bdev);
1940
1941 if (root->fs_info->fs_devices->seeding) {
1942 seeding_dev = 1;
1943 down_write(&sb->s_umount);
1944 mutex_lock(&uuid_mutex);
1945 }
1946
1947 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1948
1949 devices = &root->fs_info->fs_devices->devices;
1950
1951 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1952 list_for_each_entry(device, devices, dev_list) {
1953 if (device->bdev == bdev) {
1954 ret = -EEXIST;
1955 mutex_unlock(
1956 &root->fs_info->fs_devices->device_list_mutex);
1957 goto error;
1958 }
1959 }
1960 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1961
1962 device = kzalloc(sizeof(*device), GFP_NOFS);
1963 if (!device) {
1964 /* we can safely leave the fs_devices entry around */
1965 ret = -ENOMEM;
1966 goto error;
1967 }
1968
1969 name = rcu_string_strdup(device_path, GFP_NOFS);
1970 if (!name) {
1971 kfree(device);
1972 ret = -ENOMEM;
1973 goto error;
1974 }
1975 rcu_assign_pointer(device->name, name);
1976
1977 ret = find_next_devid(root->fs_info, &device->devid);
1978 if (ret) {
1979 rcu_string_free(device->name);
1980 kfree(device);
1981 goto error;
1982 }
1983
1984 trans = btrfs_start_transaction(root, 0);
1985 if (IS_ERR(trans)) {
1986 rcu_string_free(device->name);
1987 kfree(device);
1988 ret = PTR_ERR(trans);
1989 goto error;
1990 }
1991
1992 lock_chunks(root);
1993
1994 q = bdev_get_queue(bdev);
1995 if (blk_queue_discard(q))
1996 device->can_discard = 1;
1997 device->writeable = 1;
1998 device->work.func = pending_bios_fn;
1999 generate_random_uuid(device->uuid);
2000 spin_lock_init(&device->io_lock);
2001 device->generation = trans->transid;
2002 device->io_width = root->sectorsize;
2003 device->io_align = root->sectorsize;
2004 device->sector_size = root->sectorsize;
2005 device->total_bytes = i_size_read(bdev->bd_inode);
2006 device->disk_total_bytes = device->total_bytes;
2007 device->dev_root = root->fs_info->dev_root;
2008 device->bdev = bdev;
2009 device->in_fs_metadata = 1;
2010 device->is_tgtdev_for_dev_replace = 0;
2011 device->mode = FMODE_EXCL;
2012 set_blocksize(device->bdev, 4096);
2013
2014 if (seeding_dev) {
2015 sb->s_flags &= ~MS_RDONLY;
2016 ret = btrfs_prepare_sprout(root);
2017 BUG_ON(ret); /* -ENOMEM */
2018 }
2019
2020 device->fs_devices = root->fs_info->fs_devices;
2021
2022 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2023 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2024 list_add(&device->dev_alloc_list,
2025 &root->fs_info->fs_devices->alloc_list);
2026 root->fs_info->fs_devices->num_devices++;
2027 root->fs_info->fs_devices->open_devices++;
2028 root->fs_info->fs_devices->rw_devices++;
2029 root->fs_info->fs_devices->total_devices++;
2030 if (device->can_discard)
2031 root->fs_info->fs_devices->num_can_discard++;
2032 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2033
2034 spin_lock(&root->fs_info->free_chunk_lock);
2035 root->fs_info->free_chunk_space += device->total_bytes;
2036 spin_unlock(&root->fs_info->free_chunk_lock);
2037
2038 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2039 root->fs_info->fs_devices->rotating = 1;
2040
2041 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2042 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2043 total_bytes + device->total_bytes);
2044
2045 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2046 btrfs_set_super_num_devices(root->fs_info->super_copy,
2047 total_bytes + 1);
2048 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2049
2050 if (seeding_dev) {
2051 ret = init_first_rw_device(trans, root, device);
2052 if (ret) {
2053 btrfs_abort_transaction(trans, root, ret);
2054 goto error_trans;
2055 }
2056 ret = btrfs_finish_sprout(trans, root);
2057 if (ret) {
2058 btrfs_abort_transaction(trans, root, ret);
2059 goto error_trans;
2060 }
2061 } else {
2062 ret = btrfs_add_device(trans, root, device);
2063 if (ret) {
2064 btrfs_abort_transaction(trans, root, ret);
2065 goto error_trans;
2066 }
2067 }
2068
2069 /*
2070 * we've got more storage, clear any full flags on the space
2071 * infos
2072 */
2073 btrfs_clear_space_info_full(root->fs_info);
2074
2075 unlock_chunks(root);
2076 root->fs_info->num_tolerated_disk_barrier_failures =
2077 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2078 ret = btrfs_commit_transaction(trans, root);
2079
2080 if (seeding_dev) {
2081 mutex_unlock(&uuid_mutex);
2082 up_write(&sb->s_umount);
2083
2084 if (ret) /* transaction commit */
2085 return ret;
2086
2087 ret = btrfs_relocate_sys_chunks(root);
2088 if (ret < 0)
2089 btrfs_error(root->fs_info, ret,
2090 "Failed to relocate sys chunks after "
2091 "device initialization. This can be fixed "
2092 "using the \"btrfs balance\" command.");
2093 trans = btrfs_attach_transaction(root);
2094 if (IS_ERR(trans)) {
2095 if (PTR_ERR(trans) == -ENOENT)
2096 return 0;
2097 return PTR_ERR(trans);
2098 }
2099 ret = btrfs_commit_transaction(trans, root);
2100 }
2101
2102 return ret;
2103
2104 error_trans:
2105 unlock_chunks(root);
2106 btrfs_end_transaction(trans, root);
2107 rcu_string_free(device->name);
2108 kfree(device);
2109 error:
2110 blkdev_put(bdev, FMODE_EXCL);
2111 if (seeding_dev) {
2112 mutex_unlock(&uuid_mutex);
2113 up_write(&sb->s_umount);
2114 }
2115 return ret;
2116 }
2117
2118 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2119 struct btrfs_device **device_out)
2120 {
2121 struct request_queue *q;
2122 struct btrfs_device *device;
2123 struct block_device *bdev;
2124 struct btrfs_fs_info *fs_info = root->fs_info;
2125 struct list_head *devices;
2126 struct rcu_string *name;
2127 int ret = 0;
2128
2129 *device_out = NULL;
2130 if (fs_info->fs_devices->seeding)
2131 return -EINVAL;
2132
2133 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2134 fs_info->bdev_holder);
2135 if (IS_ERR(bdev))
2136 return PTR_ERR(bdev);
2137
2138 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2139
2140 devices = &fs_info->fs_devices->devices;
2141 list_for_each_entry(device, devices, dev_list) {
2142 if (device->bdev == bdev) {
2143 ret = -EEXIST;
2144 goto error;
2145 }
2146 }
2147
2148 device = kzalloc(sizeof(*device), GFP_NOFS);
2149 if (!device) {
2150 ret = -ENOMEM;
2151 goto error;
2152 }
2153
2154 name = rcu_string_strdup(device_path, GFP_NOFS);
2155 if (!name) {
2156 kfree(device);
2157 ret = -ENOMEM;
2158 goto error;
2159 }
2160 rcu_assign_pointer(device->name, name);
2161
2162 q = bdev_get_queue(bdev);
2163 if (blk_queue_discard(q))
2164 device->can_discard = 1;
2165 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2166 device->writeable = 1;
2167 device->work.func = pending_bios_fn;
2168 generate_random_uuid(device->uuid);
2169 device->devid = BTRFS_DEV_REPLACE_DEVID;
2170 spin_lock_init(&device->io_lock);
2171 device->generation = 0;
2172 device->io_width = root->sectorsize;
2173 device->io_align = root->sectorsize;
2174 device->sector_size = root->sectorsize;
2175 device->total_bytes = i_size_read(bdev->bd_inode);
2176 device->disk_total_bytes = device->total_bytes;
2177 device->dev_root = fs_info->dev_root;
2178 device->bdev = bdev;
2179 device->in_fs_metadata = 1;
2180 device->is_tgtdev_for_dev_replace = 1;
2181 device->mode = FMODE_EXCL;
2182 set_blocksize(device->bdev, 4096);
2183 device->fs_devices = fs_info->fs_devices;
2184 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2185 fs_info->fs_devices->num_devices++;
2186 fs_info->fs_devices->open_devices++;
2187 if (device->can_discard)
2188 fs_info->fs_devices->num_can_discard++;
2189 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2190
2191 *device_out = device;
2192 return ret;
2193
2194 error:
2195 blkdev_put(bdev, FMODE_EXCL);
2196 return ret;
2197 }
2198
2199 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2200 struct btrfs_device *tgtdev)
2201 {
2202 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2203 tgtdev->io_width = fs_info->dev_root->sectorsize;
2204 tgtdev->io_align = fs_info->dev_root->sectorsize;
2205 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2206 tgtdev->dev_root = fs_info->dev_root;
2207 tgtdev->in_fs_metadata = 1;
2208 }
2209
2210 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2211 struct btrfs_device *device)
2212 {
2213 int ret;
2214 struct btrfs_path *path;
2215 struct btrfs_root *root;
2216 struct btrfs_dev_item *dev_item;
2217 struct extent_buffer *leaf;
2218 struct btrfs_key key;
2219
2220 root = device->dev_root->fs_info->chunk_root;
2221
2222 path = btrfs_alloc_path();
2223 if (!path)
2224 return -ENOMEM;
2225
2226 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2227 key.type = BTRFS_DEV_ITEM_KEY;
2228 key.offset = device->devid;
2229
2230 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2231 if (ret < 0)
2232 goto out;
2233
2234 if (ret > 0) {
2235 ret = -ENOENT;
2236 goto out;
2237 }
2238
2239 leaf = path->nodes[0];
2240 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2241
2242 btrfs_set_device_id(leaf, dev_item, device->devid);
2243 btrfs_set_device_type(leaf, dev_item, device->type);
2244 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2245 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2246 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2247 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2248 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2249 btrfs_mark_buffer_dirty(leaf);
2250
2251 out:
2252 btrfs_free_path(path);
2253 return ret;
2254 }
2255
2256 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2257 struct btrfs_device *device, u64 new_size)
2258 {
2259 struct btrfs_super_block *super_copy =
2260 device->dev_root->fs_info->super_copy;
2261 u64 old_total = btrfs_super_total_bytes(super_copy);
2262 u64 diff = new_size - device->total_bytes;
2263
2264 if (!device->writeable)
2265 return -EACCES;
2266 if (new_size <= device->total_bytes ||
2267 device->is_tgtdev_for_dev_replace)
2268 return -EINVAL;
2269
2270 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2271 device->fs_devices->total_rw_bytes += diff;
2272
2273 device->total_bytes = new_size;
2274 device->disk_total_bytes = new_size;
2275 btrfs_clear_space_info_full(device->dev_root->fs_info);
2276
2277 return btrfs_update_device(trans, device);
2278 }
2279
2280 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2281 struct btrfs_device *device, u64 new_size)
2282 {
2283 int ret;
2284 lock_chunks(device->dev_root);
2285 ret = __btrfs_grow_device(trans, device, new_size);
2286 unlock_chunks(device->dev_root);
2287 return ret;
2288 }
2289
2290 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2291 struct btrfs_root *root,
2292 u64 chunk_tree, u64 chunk_objectid,
2293 u64 chunk_offset)
2294 {
2295 int ret;
2296 struct btrfs_path *path;
2297 struct btrfs_key key;
2298
2299 root = root->fs_info->chunk_root;
2300 path = btrfs_alloc_path();
2301 if (!path)
2302 return -ENOMEM;
2303
2304 key.objectid = chunk_objectid;
2305 key.offset = chunk_offset;
2306 key.type = BTRFS_CHUNK_ITEM_KEY;
2307
2308 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2309 if (ret < 0)
2310 goto out;
2311 else if (ret > 0) { /* Logic error or corruption */
2312 btrfs_error(root->fs_info, -ENOENT,
2313 "Failed lookup while freeing chunk.");
2314 ret = -ENOENT;
2315 goto out;
2316 }
2317
2318 ret = btrfs_del_item(trans, root, path);
2319 if (ret < 0)
2320 btrfs_error(root->fs_info, ret,
2321 "Failed to delete chunk item.");
2322 out:
2323 btrfs_free_path(path);
2324 return ret;
2325 }
2326
2327 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2328 chunk_offset)
2329 {
2330 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2331 struct btrfs_disk_key *disk_key;
2332 struct btrfs_chunk *chunk;
2333 u8 *ptr;
2334 int ret = 0;
2335 u32 num_stripes;
2336 u32 array_size;
2337 u32 len = 0;
2338 u32 cur;
2339 struct btrfs_key key;
2340
2341 array_size = btrfs_super_sys_array_size(super_copy);
2342
2343 ptr = super_copy->sys_chunk_array;
2344 cur = 0;
2345
2346 while (cur < array_size) {
2347 disk_key = (struct btrfs_disk_key *)ptr;
2348 btrfs_disk_key_to_cpu(&key, disk_key);
2349
2350 len = sizeof(*disk_key);
2351
2352 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2353 chunk = (struct btrfs_chunk *)(ptr + len);
2354 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2355 len += btrfs_chunk_item_size(num_stripes);
2356 } else {
2357 ret = -EIO;
2358 break;
2359 }
2360 if (key.objectid == chunk_objectid &&
2361 key.offset == chunk_offset) {
2362 memmove(ptr, ptr + len, array_size - (cur + len));
2363 array_size -= len;
2364 btrfs_set_super_sys_array_size(super_copy, array_size);
2365 } else {
2366 ptr += len;
2367 cur += len;
2368 }
2369 }
2370 return ret;
2371 }
2372
2373 static int btrfs_relocate_chunk(struct btrfs_root *root,
2374 u64 chunk_tree, u64 chunk_objectid,
2375 u64 chunk_offset)
2376 {
2377 struct extent_map_tree *em_tree;
2378 struct btrfs_root *extent_root;
2379 struct btrfs_trans_handle *trans;
2380 struct extent_map *em;
2381 struct map_lookup *map;
2382 int ret;
2383 int i;
2384
2385 root = root->fs_info->chunk_root;
2386 extent_root = root->fs_info->extent_root;
2387 em_tree = &root->fs_info->mapping_tree.map_tree;
2388
2389 ret = btrfs_can_relocate(extent_root, chunk_offset);
2390 if (ret)
2391 return -ENOSPC;
2392
2393 /* step one, relocate all the extents inside this chunk */
2394 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2395 if (ret)
2396 return ret;
2397
2398 trans = btrfs_start_transaction(root, 0);
2399 if (IS_ERR(trans)) {
2400 ret = PTR_ERR(trans);
2401 btrfs_std_error(root->fs_info, ret);
2402 return ret;
2403 }
2404
2405 lock_chunks(root);
2406
2407 /*
2408 * step two, delete the device extents and the
2409 * chunk tree entries
2410 */
2411 read_lock(&em_tree->lock);
2412 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2413 read_unlock(&em_tree->lock);
2414
2415 BUG_ON(!em || em->start > chunk_offset ||
2416 em->start + em->len < chunk_offset);
2417 map = (struct map_lookup *)em->bdev;
2418
2419 for (i = 0; i < map->num_stripes; i++) {
2420 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2421 map->stripes[i].physical);
2422 BUG_ON(ret);
2423
2424 if (map->stripes[i].dev) {
2425 ret = btrfs_update_device(trans, map->stripes[i].dev);
2426 BUG_ON(ret);
2427 }
2428 }
2429 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2430 chunk_offset);
2431
2432 BUG_ON(ret);
2433
2434 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2435
2436 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2437 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2438 BUG_ON(ret);
2439 }
2440
2441 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2442 BUG_ON(ret);
2443
2444 write_lock(&em_tree->lock);
2445 remove_extent_mapping(em_tree, em);
2446 write_unlock(&em_tree->lock);
2447
2448 kfree(map);
2449 em->bdev = NULL;
2450
2451 /* once for the tree */
2452 free_extent_map(em);
2453 /* once for us */
2454 free_extent_map(em);
2455
2456 unlock_chunks(root);
2457 btrfs_end_transaction(trans, root);
2458 return 0;
2459 }
2460
2461 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2462 {
2463 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2464 struct btrfs_path *path;
2465 struct extent_buffer *leaf;
2466 struct btrfs_chunk *chunk;
2467 struct btrfs_key key;
2468 struct btrfs_key found_key;
2469 u64 chunk_tree = chunk_root->root_key.objectid;
2470 u64 chunk_type;
2471 bool retried = false;
2472 int failed = 0;
2473 int ret;
2474
2475 path = btrfs_alloc_path();
2476 if (!path)
2477 return -ENOMEM;
2478
2479 again:
2480 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2481 key.offset = (u64)-1;
2482 key.type = BTRFS_CHUNK_ITEM_KEY;
2483
2484 while (1) {
2485 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2486 if (ret < 0)
2487 goto error;
2488 BUG_ON(ret == 0); /* Corruption */
2489
2490 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2491 key.type);
2492 if (ret < 0)
2493 goto error;
2494 if (ret > 0)
2495 break;
2496
2497 leaf = path->nodes[0];
2498 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2499
2500 chunk = btrfs_item_ptr(leaf, path->slots[0],
2501 struct btrfs_chunk);
2502 chunk_type = btrfs_chunk_type(leaf, chunk);
2503 btrfs_release_path(path);
2504
2505 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2506 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2507 found_key.objectid,
2508 found_key.offset);
2509 if (ret == -ENOSPC)
2510 failed++;
2511 else if (ret)
2512 BUG();
2513 }
2514
2515 if (found_key.offset == 0)
2516 break;
2517 key.offset = found_key.offset - 1;
2518 }
2519 ret = 0;
2520 if (failed && !retried) {
2521 failed = 0;
2522 retried = true;
2523 goto again;
2524 } else if (failed && retried) {
2525 WARN_ON(1);
2526 ret = -ENOSPC;
2527 }
2528 error:
2529 btrfs_free_path(path);
2530 return ret;
2531 }
2532
2533 static int insert_balance_item(struct btrfs_root *root,
2534 struct btrfs_balance_control *bctl)
2535 {
2536 struct btrfs_trans_handle *trans;
2537 struct btrfs_balance_item *item;
2538 struct btrfs_disk_balance_args disk_bargs;
2539 struct btrfs_path *path;
2540 struct extent_buffer *leaf;
2541 struct btrfs_key key;
2542 int ret, err;
2543
2544 path = btrfs_alloc_path();
2545 if (!path)
2546 return -ENOMEM;
2547
2548 trans = btrfs_start_transaction(root, 0);
2549 if (IS_ERR(trans)) {
2550 btrfs_free_path(path);
2551 return PTR_ERR(trans);
2552 }
2553
2554 key.objectid = BTRFS_BALANCE_OBJECTID;
2555 key.type = BTRFS_BALANCE_ITEM_KEY;
2556 key.offset = 0;
2557
2558 ret = btrfs_insert_empty_item(trans, root, path, &key,
2559 sizeof(*item));
2560 if (ret)
2561 goto out;
2562
2563 leaf = path->nodes[0];
2564 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2565
2566 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2567
2568 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2569 btrfs_set_balance_data(leaf, item, &disk_bargs);
2570 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2571 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2572 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2573 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2574
2575 btrfs_set_balance_flags(leaf, item, bctl->flags);
2576
2577 btrfs_mark_buffer_dirty(leaf);
2578 out:
2579 btrfs_free_path(path);
2580 err = btrfs_commit_transaction(trans, root);
2581 if (err && !ret)
2582 ret = err;
2583 return ret;
2584 }
2585
2586 static int del_balance_item(struct btrfs_root *root)
2587 {
2588 struct btrfs_trans_handle *trans;
2589 struct btrfs_path *path;
2590 struct btrfs_key key;
2591 int ret, err;
2592
2593 path = btrfs_alloc_path();
2594 if (!path)
2595 return -ENOMEM;
2596
2597 trans = btrfs_start_transaction(root, 0);
2598 if (IS_ERR(trans)) {
2599 btrfs_free_path(path);
2600 return PTR_ERR(trans);
2601 }
2602
2603 key.objectid = BTRFS_BALANCE_OBJECTID;
2604 key.type = BTRFS_BALANCE_ITEM_KEY;
2605 key.offset = 0;
2606
2607 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2608 if (ret < 0)
2609 goto out;
2610 if (ret > 0) {
2611 ret = -ENOENT;
2612 goto out;
2613 }
2614
2615 ret = btrfs_del_item(trans, root, path);
2616 out:
2617 btrfs_free_path(path);
2618 err = btrfs_commit_transaction(trans, root);
2619 if (err && !ret)
2620 ret = err;
2621 return ret;
2622 }
2623
2624 /*
2625 * This is a heuristic used to reduce the number of chunks balanced on
2626 * resume after balance was interrupted.
2627 */
2628 static void update_balance_args(struct btrfs_balance_control *bctl)
2629 {
2630 /*
2631 * Turn on soft mode for chunk types that were being converted.
2632 */
2633 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2634 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2635 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2636 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2637 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2638 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2639
2640 /*
2641 * Turn on usage filter if is not already used. The idea is
2642 * that chunks that we have already balanced should be
2643 * reasonably full. Don't do it for chunks that are being
2644 * converted - that will keep us from relocating unconverted
2645 * (albeit full) chunks.
2646 */
2647 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2648 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2649 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2650 bctl->data.usage = 90;
2651 }
2652 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2653 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2654 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2655 bctl->sys.usage = 90;
2656 }
2657 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2658 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2659 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2660 bctl->meta.usage = 90;
2661 }
2662 }
2663
2664 /*
2665 * Should be called with both balance and volume mutexes held to
2666 * serialize other volume operations (add_dev/rm_dev/resize) with
2667 * restriper. Same goes for unset_balance_control.
2668 */
2669 static void set_balance_control(struct btrfs_balance_control *bctl)
2670 {
2671 struct btrfs_fs_info *fs_info = bctl->fs_info;
2672
2673 BUG_ON(fs_info->balance_ctl);
2674
2675 spin_lock(&fs_info->balance_lock);
2676 fs_info->balance_ctl = bctl;
2677 spin_unlock(&fs_info->balance_lock);
2678 }
2679
2680 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2681 {
2682 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2683
2684 BUG_ON(!fs_info->balance_ctl);
2685
2686 spin_lock(&fs_info->balance_lock);
2687 fs_info->balance_ctl = NULL;
2688 spin_unlock(&fs_info->balance_lock);
2689
2690 kfree(bctl);
2691 }
2692
2693 /*
2694 * Balance filters. Return 1 if chunk should be filtered out
2695 * (should not be balanced).
2696 */
2697 static int chunk_profiles_filter(u64 chunk_type,
2698 struct btrfs_balance_args *bargs)
2699 {
2700 chunk_type = chunk_to_extended(chunk_type) &
2701 BTRFS_EXTENDED_PROFILE_MASK;
2702
2703 if (bargs->profiles & chunk_type)
2704 return 0;
2705
2706 return 1;
2707 }
2708
2709 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2710 struct btrfs_balance_args *bargs)
2711 {
2712 struct btrfs_block_group_cache *cache;
2713 u64 chunk_used, user_thresh;
2714 int ret = 1;
2715
2716 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2717 chunk_used = btrfs_block_group_used(&cache->item);
2718
2719 if (bargs->usage == 0)
2720 user_thresh = 1;
2721 else if (bargs->usage > 100)
2722 user_thresh = cache->key.offset;
2723 else
2724 user_thresh = div_factor_fine(cache->key.offset,
2725 bargs->usage);
2726
2727 if (chunk_used < user_thresh)
2728 ret = 0;
2729
2730 btrfs_put_block_group(cache);
2731 return ret;
2732 }
2733
2734 static int chunk_devid_filter(struct extent_buffer *leaf,
2735 struct btrfs_chunk *chunk,
2736 struct btrfs_balance_args *bargs)
2737 {
2738 struct btrfs_stripe *stripe;
2739 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2740 int i;
2741
2742 for (i = 0; i < num_stripes; i++) {
2743 stripe = btrfs_stripe_nr(chunk, i);
2744 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2745 return 0;
2746 }
2747
2748 return 1;
2749 }
2750
2751 /* [pstart, pend) */
2752 static int chunk_drange_filter(struct extent_buffer *leaf,
2753 struct btrfs_chunk *chunk,
2754 u64 chunk_offset,
2755 struct btrfs_balance_args *bargs)
2756 {
2757 struct btrfs_stripe *stripe;
2758 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2759 u64 stripe_offset;
2760 u64 stripe_length;
2761 int factor;
2762 int i;
2763
2764 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2765 return 0;
2766
2767 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2768 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2769 factor = num_stripes / 2;
2770 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2771 factor = num_stripes - 1;
2772 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2773 factor = num_stripes - 2;
2774 } else {
2775 factor = num_stripes;
2776 }
2777
2778 for (i = 0; i < num_stripes; i++) {
2779 stripe = btrfs_stripe_nr(chunk, i);
2780 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2781 continue;
2782
2783 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2784 stripe_length = btrfs_chunk_length(leaf, chunk);
2785 do_div(stripe_length, factor);
2786
2787 if (stripe_offset < bargs->pend &&
2788 stripe_offset + stripe_length > bargs->pstart)
2789 return 0;
2790 }
2791
2792 return 1;
2793 }
2794
2795 /* [vstart, vend) */
2796 static int chunk_vrange_filter(struct extent_buffer *leaf,
2797 struct btrfs_chunk *chunk,
2798 u64 chunk_offset,
2799 struct btrfs_balance_args *bargs)
2800 {
2801 if (chunk_offset < bargs->vend &&
2802 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2803 /* at least part of the chunk is inside this vrange */
2804 return 0;
2805
2806 return 1;
2807 }
2808
2809 static int chunk_soft_convert_filter(u64 chunk_type,
2810 struct btrfs_balance_args *bargs)
2811 {
2812 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2813 return 0;
2814
2815 chunk_type = chunk_to_extended(chunk_type) &
2816 BTRFS_EXTENDED_PROFILE_MASK;
2817
2818 if (bargs->target == chunk_type)
2819 return 1;
2820
2821 return 0;
2822 }
2823
2824 static int should_balance_chunk(struct btrfs_root *root,
2825 struct extent_buffer *leaf,
2826 struct btrfs_chunk *chunk, u64 chunk_offset)
2827 {
2828 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2829 struct btrfs_balance_args *bargs = NULL;
2830 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2831
2832 /* type filter */
2833 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2834 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2835 return 0;
2836 }
2837
2838 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2839 bargs = &bctl->data;
2840 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2841 bargs = &bctl->sys;
2842 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2843 bargs = &bctl->meta;
2844
2845 /* profiles filter */
2846 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2847 chunk_profiles_filter(chunk_type, bargs)) {
2848 return 0;
2849 }
2850
2851 /* usage filter */
2852 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2853 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2854 return 0;
2855 }
2856
2857 /* devid filter */
2858 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2859 chunk_devid_filter(leaf, chunk, bargs)) {
2860 return 0;
2861 }
2862
2863 /* drange filter, makes sense only with devid filter */
2864 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2865 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2866 return 0;
2867 }
2868
2869 /* vrange filter */
2870 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2871 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2872 return 0;
2873 }
2874
2875 /* soft profile changing mode */
2876 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2877 chunk_soft_convert_filter(chunk_type, bargs)) {
2878 return 0;
2879 }
2880
2881 return 1;
2882 }
2883
2884 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2885 {
2886 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2887 struct btrfs_root *chunk_root = fs_info->chunk_root;
2888 struct btrfs_root *dev_root = fs_info->dev_root;
2889 struct list_head *devices;
2890 struct btrfs_device *device;
2891 u64 old_size;
2892 u64 size_to_free;
2893 struct btrfs_chunk *chunk;
2894 struct btrfs_path *path;
2895 struct btrfs_key key;
2896 struct btrfs_key found_key;
2897 struct btrfs_trans_handle *trans;
2898 struct extent_buffer *leaf;
2899 int slot;
2900 int ret;
2901 int enospc_errors = 0;
2902 bool counting = true;
2903
2904 /* step one make some room on all the devices */
2905 devices = &fs_info->fs_devices->devices;
2906 list_for_each_entry(device, devices, dev_list) {
2907 old_size = device->total_bytes;
2908 size_to_free = div_factor(old_size, 1);
2909 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2910 if (!device->writeable ||
2911 device->total_bytes - device->bytes_used > size_to_free ||
2912 device->is_tgtdev_for_dev_replace)
2913 continue;
2914
2915 ret = btrfs_shrink_device(device, old_size - size_to_free);
2916 if (ret == -ENOSPC)
2917 break;
2918 BUG_ON(ret);
2919
2920 trans = btrfs_start_transaction(dev_root, 0);
2921 BUG_ON(IS_ERR(trans));
2922
2923 ret = btrfs_grow_device(trans, device, old_size);
2924 BUG_ON(ret);
2925
2926 btrfs_end_transaction(trans, dev_root);
2927 }
2928
2929 /* step two, relocate all the chunks */
2930 path = btrfs_alloc_path();
2931 if (!path) {
2932 ret = -ENOMEM;
2933 goto error;
2934 }
2935
2936 /* zero out stat counters */
2937 spin_lock(&fs_info->balance_lock);
2938 memset(&bctl->stat, 0, sizeof(bctl->stat));
2939 spin_unlock(&fs_info->balance_lock);
2940 again:
2941 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2942 key.offset = (u64)-1;
2943 key.type = BTRFS_CHUNK_ITEM_KEY;
2944
2945 while (1) {
2946 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2947 atomic_read(&fs_info->balance_cancel_req)) {
2948 ret = -ECANCELED;
2949 goto error;
2950 }
2951
2952 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2953 if (ret < 0)
2954 goto error;
2955
2956 /*
2957 * this shouldn't happen, it means the last relocate
2958 * failed
2959 */
2960 if (ret == 0)
2961 BUG(); /* FIXME break ? */
2962
2963 ret = btrfs_previous_item(chunk_root, path, 0,
2964 BTRFS_CHUNK_ITEM_KEY);
2965 if (ret) {
2966 ret = 0;
2967 break;
2968 }
2969
2970 leaf = path->nodes[0];
2971 slot = path->slots[0];
2972 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2973
2974 if (found_key.objectid != key.objectid)
2975 break;
2976
2977 /* chunk zero is special */
2978 if (found_key.offset == 0)
2979 break;
2980
2981 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2982
2983 if (!counting) {
2984 spin_lock(&fs_info->balance_lock);
2985 bctl->stat.considered++;
2986 spin_unlock(&fs_info->balance_lock);
2987 }
2988
2989 ret = should_balance_chunk(chunk_root, leaf, chunk,
2990 found_key.offset);
2991 btrfs_release_path(path);
2992 if (!ret)
2993 goto loop;
2994
2995 if (counting) {
2996 spin_lock(&fs_info->balance_lock);
2997 bctl->stat.expected++;
2998 spin_unlock(&fs_info->balance_lock);
2999 goto loop;
3000 }
3001
3002 ret = btrfs_relocate_chunk(chunk_root,
3003 chunk_root->root_key.objectid,
3004 found_key.objectid,
3005 found_key.offset);
3006 if (ret && ret != -ENOSPC)
3007 goto error;
3008 if (ret == -ENOSPC) {
3009 enospc_errors++;
3010 } else {
3011 spin_lock(&fs_info->balance_lock);
3012 bctl->stat.completed++;
3013 spin_unlock(&fs_info->balance_lock);
3014 }
3015 loop:
3016 key.offset = found_key.offset - 1;
3017 }
3018
3019 if (counting) {
3020 btrfs_release_path(path);
3021 counting = false;
3022 goto again;
3023 }
3024 error:
3025 btrfs_free_path(path);
3026 if (enospc_errors) {
3027 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3028 enospc_errors);
3029 if (!ret)
3030 ret = -ENOSPC;
3031 }
3032
3033 return ret;
3034 }
3035
3036 /**
3037 * alloc_profile_is_valid - see if a given profile is valid and reduced
3038 * @flags: profile to validate
3039 * @extended: if true @flags is treated as an extended profile
3040 */
3041 static int alloc_profile_is_valid(u64 flags, int extended)
3042 {
3043 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3044 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3045
3046 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3047
3048 /* 1) check that all other bits are zeroed */
3049 if (flags & ~mask)
3050 return 0;
3051
3052 /* 2) see if profile is reduced */
3053 if (flags == 0)
3054 return !extended; /* "0" is valid for usual profiles */
3055
3056 /* true if exactly one bit set */
3057 return (flags & (flags - 1)) == 0;
3058 }
3059
3060 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3061 {
3062 /* cancel requested || normal exit path */
3063 return atomic_read(&fs_info->balance_cancel_req) ||
3064 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3065 atomic_read(&fs_info->balance_cancel_req) == 0);
3066 }
3067
3068 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3069 {
3070 int ret;
3071
3072 unset_balance_control(fs_info);
3073 ret = del_balance_item(fs_info->tree_root);
3074 if (ret)
3075 btrfs_std_error(fs_info, ret);
3076
3077 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3078 }
3079
3080 /*
3081 * Should be called with both balance and volume mutexes held
3082 */
3083 int btrfs_balance(struct btrfs_balance_control *bctl,
3084 struct btrfs_ioctl_balance_args *bargs)
3085 {
3086 struct btrfs_fs_info *fs_info = bctl->fs_info;
3087 u64 allowed;
3088 int mixed = 0;
3089 int ret;
3090 u64 num_devices;
3091 unsigned seq;
3092
3093 if (btrfs_fs_closing(fs_info) ||
3094 atomic_read(&fs_info->balance_pause_req) ||
3095 atomic_read(&fs_info->balance_cancel_req)) {
3096 ret = -EINVAL;
3097 goto out;
3098 }
3099
3100 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3101 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3102 mixed = 1;
3103
3104 /*
3105 * In case of mixed groups both data and meta should be picked,
3106 * and identical options should be given for both of them.
3107 */
3108 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3109 if (mixed && (bctl->flags & allowed)) {
3110 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3111 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3112 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3113 printk(KERN_ERR "btrfs: with mixed groups data and "
3114 "metadata balance options must be the same\n");
3115 ret = -EINVAL;
3116 goto out;
3117 }
3118 }
3119
3120 num_devices = fs_info->fs_devices->num_devices;
3121 btrfs_dev_replace_lock(&fs_info->dev_replace);
3122 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3123 BUG_ON(num_devices < 1);
3124 num_devices--;
3125 }
3126 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3127 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3128 if (num_devices == 1)
3129 allowed |= BTRFS_BLOCK_GROUP_DUP;
3130 else if (num_devices > 1)
3131 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3132 if (num_devices > 2)
3133 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3134 if (num_devices > 3)
3135 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3136 BTRFS_BLOCK_GROUP_RAID6);
3137 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3138 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3139 (bctl->data.target & ~allowed))) {
3140 printk(KERN_ERR "btrfs: unable to start balance with target "
3141 "data profile %llu\n",
3142 (unsigned long long)bctl->data.target);
3143 ret = -EINVAL;
3144 goto out;
3145 }
3146 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3148 (bctl->meta.target & ~allowed))) {
3149 printk(KERN_ERR "btrfs: unable to start balance with target "
3150 "metadata profile %llu\n",
3151 (unsigned long long)bctl->meta.target);
3152 ret = -EINVAL;
3153 goto out;
3154 }
3155 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3156 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3157 (bctl->sys.target & ~allowed))) {
3158 printk(KERN_ERR "btrfs: unable to start balance with target "
3159 "system profile %llu\n",
3160 (unsigned long long)bctl->sys.target);
3161 ret = -EINVAL;
3162 goto out;
3163 }
3164
3165 /* allow dup'ed data chunks only in mixed mode */
3166 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3167 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3168 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3169 ret = -EINVAL;
3170 goto out;
3171 }
3172
3173 /* allow to reduce meta or sys integrity only if force set */
3174 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3175 BTRFS_BLOCK_GROUP_RAID10 |
3176 BTRFS_BLOCK_GROUP_RAID5 |
3177 BTRFS_BLOCK_GROUP_RAID6;
3178 do {
3179 seq = read_seqbegin(&fs_info->profiles_lock);
3180
3181 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3182 (fs_info->avail_system_alloc_bits & allowed) &&
3183 !(bctl->sys.target & allowed)) ||
3184 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3185 (fs_info->avail_metadata_alloc_bits & allowed) &&
3186 !(bctl->meta.target & allowed))) {
3187 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3188 printk(KERN_INFO "btrfs: force reducing metadata "
3189 "integrity\n");
3190 } else {
3191 printk(KERN_ERR "btrfs: balance will reduce metadata "
3192 "integrity, use force if you want this\n");
3193 ret = -EINVAL;
3194 goto out;
3195 }
3196 }
3197 } while (read_seqretry(&fs_info->profiles_lock, seq));
3198
3199 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3200 int num_tolerated_disk_barrier_failures;
3201 u64 target = bctl->sys.target;
3202
3203 num_tolerated_disk_barrier_failures =
3204 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3205 if (num_tolerated_disk_barrier_failures > 0 &&
3206 (target &
3207 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3208 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3209 num_tolerated_disk_barrier_failures = 0;
3210 else if (num_tolerated_disk_barrier_failures > 1 &&
3211 (target &
3212 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3213 num_tolerated_disk_barrier_failures = 1;
3214
3215 fs_info->num_tolerated_disk_barrier_failures =
3216 num_tolerated_disk_barrier_failures;
3217 }
3218
3219 ret = insert_balance_item(fs_info->tree_root, bctl);
3220 if (ret && ret != -EEXIST)
3221 goto out;
3222
3223 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3224 BUG_ON(ret == -EEXIST);
3225 set_balance_control(bctl);
3226 } else {
3227 BUG_ON(ret != -EEXIST);
3228 spin_lock(&fs_info->balance_lock);
3229 update_balance_args(bctl);
3230 spin_unlock(&fs_info->balance_lock);
3231 }
3232
3233 atomic_inc(&fs_info->balance_running);
3234 mutex_unlock(&fs_info->balance_mutex);
3235
3236 ret = __btrfs_balance(fs_info);
3237
3238 mutex_lock(&fs_info->balance_mutex);
3239 atomic_dec(&fs_info->balance_running);
3240
3241 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3242 fs_info->num_tolerated_disk_barrier_failures =
3243 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3244 }
3245
3246 if (bargs) {
3247 memset(bargs, 0, sizeof(*bargs));
3248 update_ioctl_balance_args(fs_info, 0, bargs);
3249 }
3250
3251 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3252 balance_need_close(fs_info)) {
3253 __cancel_balance(fs_info);
3254 }
3255
3256 wake_up(&fs_info->balance_wait_q);
3257
3258 return ret;
3259 out:
3260 if (bctl->flags & BTRFS_BALANCE_RESUME)
3261 __cancel_balance(fs_info);
3262 else {
3263 kfree(bctl);
3264 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3265 }
3266 return ret;
3267 }
3268
3269 static int balance_kthread(void *data)
3270 {
3271 struct btrfs_fs_info *fs_info = data;
3272 int ret = 0;
3273
3274 mutex_lock(&fs_info->volume_mutex);
3275 mutex_lock(&fs_info->balance_mutex);
3276
3277 if (fs_info->balance_ctl) {
3278 printk(KERN_INFO "btrfs: continuing balance\n");
3279 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3280 }
3281
3282 mutex_unlock(&fs_info->balance_mutex);
3283 mutex_unlock(&fs_info->volume_mutex);
3284
3285 return ret;
3286 }
3287
3288 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3289 {
3290 struct task_struct *tsk;
3291
3292 spin_lock(&fs_info->balance_lock);
3293 if (!fs_info->balance_ctl) {
3294 spin_unlock(&fs_info->balance_lock);
3295 return 0;
3296 }
3297 spin_unlock(&fs_info->balance_lock);
3298
3299 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3300 printk(KERN_INFO "btrfs: force skipping balance\n");
3301 return 0;
3302 }
3303
3304 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3305 return PTR_RET(tsk);
3306 }
3307
3308 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3309 {
3310 struct btrfs_balance_control *bctl;
3311 struct btrfs_balance_item *item;
3312 struct btrfs_disk_balance_args disk_bargs;
3313 struct btrfs_path *path;
3314 struct extent_buffer *leaf;
3315 struct btrfs_key key;
3316 int ret;
3317
3318 path = btrfs_alloc_path();
3319 if (!path)
3320 return -ENOMEM;
3321
3322 key.objectid = BTRFS_BALANCE_OBJECTID;
3323 key.type = BTRFS_BALANCE_ITEM_KEY;
3324 key.offset = 0;
3325
3326 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3327 if (ret < 0)
3328 goto out;
3329 if (ret > 0) { /* ret = -ENOENT; */
3330 ret = 0;
3331 goto out;
3332 }
3333
3334 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3335 if (!bctl) {
3336 ret = -ENOMEM;
3337 goto out;
3338 }
3339
3340 leaf = path->nodes[0];
3341 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3342
3343 bctl->fs_info = fs_info;
3344 bctl->flags = btrfs_balance_flags(leaf, item);
3345 bctl->flags |= BTRFS_BALANCE_RESUME;
3346
3347 btrfs_balance_data(leaf, item, &disk_bargs);
3348 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3349 btrfs_balance_meta(leaf, item, &disk_bargs);
3350 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3351 btrfs_balance_sys(leaf, item, &disk_bargs);
3352 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3353
3354 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3355
3356 mutex_lock(&fs_info->volume_mutex);
3357 mutex_lock(&fs_info->balance_mutex);
3358
3359 set_balance_control(bctl);
3360
3361 mutex_unlock(&fs_info->balance_mutex);
3362 mutex_unlock(&fs_info->volume_mutex);
3363 out:
3364 btrfs_free_path(path);
3365 return ret;
3366 }
3367
3368 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3369 {
3370 int ret = 0;
3371
3372 mutex_lock(&fs_info->balance_mutex);
3373 if (!fs_info->balance_ctl) {
3374 mutex_unlock(&fs_info->balance_mutex);
3375 return -ENOTCONN;
3376 }
3377
3378 if (atomic_read(&fs_info->balance_running)) {
3379 atomic_inc(&fs_info->balance_pause_req);
3380 mutex_unlock(&fs_info->balance_mutex);
3381
3382 wait_event(fs_info->balance_wait_q,
3383 atomic_read(&fs_info->balance_running) == 0);
3384
3385 mutex_lock(&fs_info->balance_mutex);
3386 /* we are good with balance_ctl ripped off from under us */
3387 BUG_ON(atomic_read(&fs_info->balance_running));
3388 atomic_dec(&fs_info->balance_pause_req);
3389 } else {
3390 ret = -ENOTCONN;
3391 }
3392
3393 mutex_unlock(&fs_info->balance_mutex);
3394 return ret;
3395 }
3396
3397 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3398 {
3399 mutex_lock(&fs_info->balance_mutex);
3400 if (!fs_info->balance_ctl) {
3401 mutex_unlock(&fs_info->balance_mutex);
3402 return -ENOTCONN;
3403 }
3404
3405 atomic_inc(&fs_info->balance_cancel_req);
3406 /*
3407 * if we are running just wait and return, balance item is
3408 * deleted in btrfs_balance in this case
3409 */
3410 if (atomic_read(&fs_info->balance_running)) {
3411 mutex_unlock(&fs_info->balance_mutex);
3412 wait_event(fs_info->balance_wait_q,
3413 atomic_read(&fs_info->balance_running) == 0);
3414 mutex_lock(&fs_info->balance_mutex);
3415 } else {
3416 /* __cancel_balance needs volume_mutex */
3417 mutex_unlock(&fs_info->balance_mutex);
3418 mutex_lock(&fs_info->volume_mutex);
3419 mutex_lock(&fs_info->balance_mutex);
3420
3421 if (fs_info->balance_ctl)
3422 __cancel_balance(fs_info);
3423
3424 mutex_unlock(&fs_info->volume_mutex);
3425 }
3426
3427 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3428 atomic_dec(&fs_info->balance_cancel_req);
3429 mutex_unlock(&fs_info->balance_mutex);
3430 return 0;
3431 }
3432
3433 static int btrfs_uuid_scan_kthread(void *data)
3434 {
3435 struct btrfs_fs_info *fs_info = data;
3436 struct btrfs_root *root = fs_info->tree_root;
3437 struct btrfs_key key;
3438 struct btrfs_key max_key;
3439 struct btrfs_path *path = NULL;
3440 int ret = 0;
3441 struct extent_buffer *eb;
3442 int slot;
3443 struct btrfs_root_item root_item;
3444 u32 item_size;
3445 struct btrfs_trans_handle *trans;
3446
3447 path = btrfs_alloc_path();
3448 if (!path) {
3449 ret = -ENOMEM;
3450 goto out;
3451 }
3452
3453 key.objectid = 0;
3454 key.type = BTRFS_ROOT_ITEM_KEY;
3455 key.offset = 0;
3456
3457 max_key.objectid = (u64)-1;
3458 max_key.type = BTRFS_ROOT_ITEM_KEY;
3459 max_key.offset = (u64)-1;
3460
3461 path->keep_locks = 1;
3462
3463 while (1) {
3464 ret = btrfs_search_forward(root, &key, &max_key, path, 0);
3465 if (ret) {
3466 if (ret > 0)
3467 ret = 0;
3468 break;
3469 }
3470
3471 if (key.type != BTRFS_ROOT_ITEM_KEY ||
3472 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3473 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3474 key.objectid > BTRFS_LAST_FREE_OBJECTID)
3475 goto skip;
3476
3477 eb = path->nodes[0];
3478 slot = path->slots[0];
3479 item_size = btrfs_item_size_nr(eb, slot);
3480 if (item_size < sizeof(root_item))
3481 goto skip;
3482
3483 trans = NULL;
3484 read_extent_buffer(eb, &root_item,
3485 btrfs_item_ptr_offset(eb, slot),
3486 (int)sizeof(root_item));
3487 if (btrfs_root_refs(&root_item) == 0)
3488 goto skip;
3489 if (!btrfs_is_empty_uuid(root_item.uuid)) {
3490 /*
3491 * 1 - subvol uuid item
3492 * 1 - received_subvol uuid item
3493 */
3494 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3495 if (IS_ERR(trans)) {
3496 ret = PTR_ERR(trans);
3497 break;
3498 }
3499 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3500 root_item.uuid,
3501 BTRFS_UUID_KEY_SUBVOL,
3502 key.objectid);
3503 if (ret < 0) {
3504 pr_warn("btrfs: uuid_tree_add failed %d\n",
3505 ret);
3506 btrfs_end_transaction(trans,
3507 fs_info->uuid_root);
3508 break;
3509 }
3510 }
3511
3512 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3513 if (!trans) {
3514 /* 1 - received_subvol uuid item */
3515 trans = btrfs_start_transaction(
3516 fs_info->uuid_root, 1);
3517 if (IS_ERR(trans)) {
3518 ret = PTR_ERR(trans);
3519 break;
3520 }
3521 }
3522 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3523 root_item.received_uuid,
3524 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3525 key.objectid);
3526 if (ret < 0) {
3527 pr_warn("btrfs: uuid_tree_add failed %d\n",
3528 ret);
3529 btrfs_end_transaction(trans,
3530 fs_info->uuid_root);
3531 break;
3532 }
3533 }
3534
3535 if (trans) {
3536 ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3537 if (ret)
3538 break;
3539 }
3540
3541 skip:
3542 btrfs_release_path(path);
3543 if (key.offset < (u64)-1) {
3544 key.offset++;
3545 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3546 key.offset = 0;
3547 key.type = BTRFS_ROOT_ITEM_KEY;
3548 } else if (key.objectid < (u64)-1) {
3549 key.offset = 0;
3550 key.type = BTRFS_ROOT_ITEM_KEY;
3551 key.objectid++;
3552 } else {
3553 break;
3554 }
3555 cond_resched();
3556 }
3557
3558 out:
3559 btrfs_free_path(path);
3560 if (ret)
3561 pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
3562 else
3563 fs_info->update_uuid_tree_gen = 1;
3564 up(&fs_info->uuid_tree_rescan_sem);
3565 return 0;
3566 }
3567
3568 /*
3569 * Callback for btrfs_uuid_tree_iterate().
3570 * returns:
3571 * 0 check succeeded, the entry is not outdated.
3572 * < 0 if an error occured.
3573 * > 0 if the check failed, which means the caller shall remove the entry.
3574 */
3575 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3576 u8 *uuid, u8 type, u64 subid)
3577 {
3578 struct btrfs_key key;
3579 int ret = 0;
3580 struct btrfs_root *subvol_root;
3581
3582 if (type != BTRFS_UUID_KEY_SUBVOL &&
3583 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3584 goto out;
3585
3586 key.objectid = subid;
3587 key.type = BTRFS_ROOT_ITEM_KEY;
3588 key.offset = (u64)-1;
3589 subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3590 if (IS_ERR(subvol_root)) {
3591 ret = PTR_ERR(subvol_root);
3592 if (ret == -ENOENT)
3593 ret = 1;
3594 goto out;
3595 }
3596
3597 switch (type) {
3598 case BTRFS_UUID_KEY_SUBVOL:
3599 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3600 ret = 1;
3601 break;
3602 case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3603 if (memcmp(uuid, subvol_root->root_item.received_uuid,
3604 BTRFS_UUID_SIZE))
3605 ret = 1;
3606 break;
3607 }
3608
3609 out:
3610 return ret;
3611 }
3612
3613 static int btrfs_uuid_rescan_kthread(void *data)
3614 {
3615 struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3616 int ret;
3617
3618 /*
3619 * 1st step is to iterate through the existing UUID tree and
3620 * to delete all entries that contain outdated data.
3621 * 2nd step is to add all missing entries to the UUID tree.
3622 */
3623 ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3624 if (ret < 0) {
3625 pr_warn("btrfs: iterating uuid_tree failed %d\n", ret);
3626 up(&fs_info->uuid_tree_rescan_sem);
3627 return ret;
3628 }
3629 return btrfs_uuid_scan_kthread(data);
3630 }
3631
3632 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3633 {
3634 struct btrfs_trans_handle *trans;
3635 struct btrfs_root *tree_root = fs_info->tree_root;
3636 struct btrfs_root *uuid_root;
3637 struct task_struct *task;
3638 int ret;
3639
3640 /*
3641 * 1 - root node
3642 * 1 - root item
3643 */
3644 trans = btrfs_start_transaction(tree_root, 2);
3645 if (IS_ERR(trans))
3646 return PTR_ERR(trans);
3647
3648 uuid_root = btrfs_create_tree(trans, fs_info,
3649 BTRFS_UUID_TREE_OBJECTID);
3650 if (IS_ERR(uuid_root)) {
3651 btrfs_abort_transaction(trans, tree_root,
3652 PTR_ERR(uuid_root));
3653 return PTR_ERR(uuid_root);
3654 }
3655
3656 fs_info->uuid_root = uuid_root;
3657
3658 ret = btrfs_commit_transaction(trans, tree_root);
3659 if (ret)
3660 return ret;
3661
3662 down(&fs_info->uuid_tree_rescan_sem);
3663 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3664 if (IS_ERR(task)) {
3665 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3666 pr_warn("btrfs: failed to start uuid_scan task\n");
3667 up(&fs_info->uuid_tree_rescan_sem);
3668 return PTR_ERR(task);
3669 }
3670
3671 return 0;
3672 }
3673
3674 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3675 {
3676 struct task_struct *task;
3677
3678 down(&fs_info->uuid_tree_rescan_sem);
3679 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3680 if (IS_ERR(task)) {
3681 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3682 pr_warn("btrfs: failed to start uuid_rescan task\n");
3683 up(&fs_info->uuid_tree_rescan_sem);
3684 return PTR_ERR(task);
3685 }
3686
3687 return 0;
3688 }
3689
3690 /*
3691 * shrinking a device means finding all of the device extents past
3692 * the new size, and then following the back refs to the chunks.
3693 * The chunk relocation code actually frees the device extent
3694 */
3695 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3696 {
3697 struct btrfs_trans_handle *trans;
3698 struct btrfs_root *root = device->dev_root;
3699 struct btrfs_dev_extent *dev_extent = NULL;
3700 struct btrfs_path *path;
3701 u64 length;
3702 u64 chunk_tree;
3703 u64 chunk_objectid;
3704 u64 chunk_offset;
3705 int ret;
3706 int slot;
3707 int failed = 0;
3708 bool retried = false;
3709 struct extent_buffer *l;
3710 struct btrfs_key key;
3711 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3712 u64 old_total = btrfs_super_total_bytes(super_copy);
3713 u64 old_size = device->total_bytes;
3714 u64 diff = device->total_bytes - new_size;
3715
3716 if (device->is_tgtdev_for_dev_replace)
3717 return -EINVAL;
3718
3719 path = btrfs_alloc_path();
3720 if (!path)
3721 return -ENOMEM;
3722
3723 path->reada = 2;
3724
3725 lock_chunks(root);
3726
3727 device->total_bytes = new_size;
3728 if (device->writeable) {
3729 device->fs_devices->total_rw_bytes -= diff;
3730 spin_lock(&root->fs_info->free_chunk_lock);
3731 root->fs_info->free_chunk_space -= diff;
3732 spin_unlock(&root->fs_info->free_chunk_lock);
3733 }
3734 unlock_chunks(root);
3735
3736 again:
3737 key.objectid = device->devid;
3738 key.offset = (u64)-1;
3739 key.type = BTRFS_DEV_EXTENT_KEY;
3740
3741 do {
3742 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3743 if (ret < 0)
3744 goto done;
3745
3746 ret = btrfs_previous_item(root, path, 0, key.type);
3747 if (ret < 0)
3748 goto done;
3749 if (ret) {
3750 ret = 0;
3751 btrfs_release_path(path);
3752 break;
3753 }
3754
3755 l = path->nodes[0];
3756 slot = path->slots[0];
3757 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3758
3759 if (key.objectid != device->devid) {
3760 btrfs_release_path(path);
3761 break;
3762 }
3763
3764 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3765 length = btrfs_dev_extent_length(l, dev_extent);
3766
3767 if (key.offset + length <= new_size) {
3768 btrfs_release_path(path);
3769 break;
3770 }
3771
3772 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3773 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3774 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3775 btrfs_release_path(path);
3776
3777 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3778 chunk_offset);
3779 if (ret && ret != -ENOSPC)
3780 goto done;
3781 if (ret == -ENOSPC)
3782 failed++;
3783 } while (key.offset-- > 0);
3784
3785 if (failed && !retried) {
3786 failed = 0;
3787 retried = true;
3788 goto again;
3789 } else if (failed && retried) {
3790 ret = -ENOSPC;
3791 lock_chunks(root);
3792
3793 device->total_bytes = old_size;
3794 if (device->writeable)
3795 device->fs_devices->total_rw_bytes += diff;
3796 spin_lock(&root->fs_info->free_chunk_lock);
3797 root->fs_info->free_chunk_space += diff;
3798 spin_unlock(&root->fs_info->free_chunk_lock);
3799 unlock_chunks(root);
3800 goto done;
3801 }
3802
3803 /* Shrinking succeeded, else we would be at "done". */
3804 trans = btrfs_start_transaction(root, 0);
3805 if (IS_ERR(trans)) {
3806 ret = PTR_ERR(trans);
3807 goto done;
3808 }
3809
3810 lock_chunks(root);
3811
3812 device->disk_total_bytes = new_size;
3813 /* Now btrfs_update_device() will change the on-disk size. */
3814 ret = btrfs_update_device(trans, device);
3815 if (ret) {
3816 unlock_chunks(root);
3817 btrfs_end_transaction(trans, root);
3818 goto done;
3819 }
3820 WARN_ON(diff > old_total);
3821 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3822 unlock_chunks(root);
3823 btrfs_end_transaction(trans, root);
3824 done:
3825 btrfs_free_path(path);
3826 return ret;
3827 }
3828
3829 static int btrfs_add_system_chunk(struct btrfs_root *root,
3830 struct btrfs_key *key,
3831 struct btrfs_chunk *chunk, int item_size)
3832 {
3833 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3834 struct btrfs_disk_key disk_key;
3835 u32 array_size;
3836 u8 *ptr;
3837
3838 array_size = btrfs_super_sys_array_size(super_copy);
3839 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3840 return -EFBIG;
3841
3842 ptr = super_copy->sys_chunk_array + array_size;
3843 btrfs_cpu_key_to_disk(&disk_key, key);
3844 memcpy(ptr, &disk_key, sizeof(disk_key));
3845 ptr += sizeof(disk_key);
3846 memcpy(ptr, chunk, item_size);
3847 item_size += sizeof(disk_key);
3848 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3849 return 0;
3850 }
3851
3852 /*
3853 * sort the devices in descending order by max_avail, total_avail
3854 */
3855 static int btrfs_cmp_device_info(const void *a, const void *b)
3856 {
3857 const struct btrfs_device_info *di_a = a;
3858 const struct btrfs_device_info *di_b = b;
3859
3860 if (di_a->max_avail > di_b->max_avail)
3861 return -1;
3862 if (di_a->max_avail < di_b->max_avail)
3863 return 1;
3864 if (di_a->total_avail > di_b->total_avail)
3865 return -1;
3866 if (di_a->total_avail < di_b->total_avail)
3867 return 1;
3868 return 0;
3869 }
3870
3871 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3872 [BTRFS_RAID_RAID10] = {
3873 .sub_stripes = 2,
3874 .dev_stripes = 1,
3875 .devs_max = 0, /* 0 == as many as possible */
3876 .devs_min = 4,
3877 .devs_increment = 2,
3878 .ncopies = 2,
3879 },
3880 [BTRFS_RAID_RAID1] = {
3881 .sub_stripes = 1,
3882 .dev_stripes = 1,
3883 .devs_max = 2,
3884 .devs_min = 2,
3885 .devs_increment = 2,
3886 .ncopies = 2,
3887 },
3888 [BTRFS_RAID_DUP] = {
3889 .sub_stripes = 1,
3890 .dev_stripes = 2,
3891 .devs_max = 1,
3892 .devs_min = 1,
3893 .devs_increment = 1,
3894 .ncopies = 2,
3895 },
3896 [BTRFS_RAID_RAID0] = {
3897 .sub_stripes = 1,
3898 .dev_stripes = 1,
3899 .devs_max = 0,
3900 .devs_min = 2,
3901 .devs_increment = 1,
3902 .ncopies = 1,
3903 },
3904 [BTRFS_RAID_SINGLE] = {
3905 .sub_stripes = 1,
3906 .dev_stripes = 1,
3907 .devs_max = 1,
3908 .devs_min = 1,
3909 .devs_increment = 1,
3910 .ncopies = 1,
3911 },
3912 [BTRFS_RAID_RAID5] = {
3913 .sub_stripes = 1,
3914 .dev_stripes = 1,
3915 .devs_max = 0,
3916 .devs_min = 2,
3917 .devs_increment = 1,
3918 .ncopies = 2,
3919 },
3920 [BTRFS_RAID_RAID6] = {
3921 .sub_stripes = 1,
3922 .dev_stripes = 1,
3923 .devs_max = 0,
3924 .devs_min = 3,
3925 .devs_increment = 1,
3926 .ncopies = 3,
3927 },
3928 };
3929
3930 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3931 {
3932 /* TODO allow them to set a preferred stripe size */
3933 return 64 * 1024;
3934 }
3935
3936 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3937 {
3938 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3939 return;
3940
3941 btrfs_set_fs_incompat(info, RAID56);
3942 }
3943
3944 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3945 struct btrfs_root *extent_root, u64 start,
3946 u64 type)
3947 {
3948 struct btrfs_fs_info *info = extent_root->fs_info;
3949 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3950 struct list_head *cur;
3951 struct map_lookup *map = NULL;
3952 struct extent_map_tree *em_tree;
3953 struct extent_map *em;
3954 struct btrfs_device_info *devices_info = NULL;
3955 u64 total_avail;
3956 int num_stripes; /* total number of stripes to allocate */
3957 int data_stripes; /* number of stripes that count for
3958 block group size */
3959 int sub_stripes; /* sub_stripes info for map */
3960 int dev_stripes; /* stripes per dev */
3961 int devs_max; /* max devs to use */
3962 int devs_min; /* min devs needed */
3963 int devs_increment; /* ndevs has to be a multiple of this */
3964 int ncopies; /* how many copies to data has */
3965 int ret;
3966 u64 max_stripe_size;
3967 u64 max_chunk_size;
3968 u64 stripe_size;
3969 u64 num_bytes;
3970 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3971 int ndevs;
3972 int i;
3973 int j;
3974 int index;
3975
3976 BUG_ON(!alloc_profile_is_valid(type, 0));
3977
3978 if (list_empty(&fs_devices->alloc_list))
3979 return -ENOSPC;
3980
3981 index = __get_raid_index(type);
3982
3983 sub_stripes = btrfs_raid_array[index].sub_stripes;
3984 dev_stripes = btrfs_raid_array[index].dev_stripes;
3985 devs_max = btrfs_raid_array[index].devs_max;
3986 devs_min = btrfs_raid_array[index].devs_min;
3987 devs_increment = btrfs_raid_array[index].devs_increment;
3988 ncopies = btrfs_raid_array[index].ncopies;
3989
3990 if (type & BTRFS_BLOCK_GROUP_DATA) {
3991 max_stripe_size = 1024 * 1024 * 1024;
3992 max_chunk_size = 10 * max_stripe_size;
3993 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3994 /* for larger filesystems, use larger metadata chunks */
3995 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3996 max_stripe_size = 1024 * 1024 * 1024;
3997 else
3998 max_stripe_size = 256 * 1024 * 1024;
3999 max_chunk_size = max_stripe_size;
4000 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4001 max_stripe_size = 32 * 1024 * 1024;
4002 max_chunk_size = 2 * max_stripe_size;
4003 } else {
4004 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
4005 type);
4006 BUG_ON(1);
4007 }
4008
4009 /* we don't want a chunk larger than 10% of writeable space */
4010 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4011 max_chunk_size);
4012
4013 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4014 GFP_NOFS);
4015 if (!devices_info)
4016 return -ENOMEM;
4017
4018 cur = fs_devices->alloc_list.next;
4019
4020 /*
4021 * in the first pass through the devices list, we gather information
4022 * about the available holes on each device.
4023 */
4024 ndevs = 0;
4025 while (cur != &fs_devices->alloc_list) {
4026 struct btrfs_device *device;
4027 u64 max_avail;
4028 u64 dev_offset;
4029
4030 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4031
4032 cur = cur->next;
4033
4034 if (!device->writeable) {
4035 WARN(1, KERN_ERR
4036 "btrfs: read-only device in alloc_list\n");
4037 continue;
4038 }
4039
4040 if (!device->in_fs_metadata ||
4041 device->is_tgtdev_for_dev_replace)
4042 continue;
4043
4044 if (device->total_bytes > device->bytes_used)
4045 total_avail = device->total_bytes - device->bytes_used;
4046 else
4047 total_avail = 0;
4048
4049 /* If there is no space on this device, skip it. */
4050 if (total_avail == 0)
4051 continue;
4052
4053 ret = find_free_dev_extent(trans, device,
4054 max_stripe_size * dev_stripes,
4055 &dev_offset, &max_avail);
4056 if (ret && ret != -ENOSPC)
4057 goto error;
4058
4059 if (ret == 0)
4060 max_avail = max_stripe_size * dev_stripes;
4061
4062 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4063 continue;
4064
4065 if (ndevs == fs_devices->rw_devices) {
4066 WARN(1, "%s: found more than %llu devices\n",
4067 __func__, fs_devices->rw_devices);
4068 break;
4069 }
4070 devices_info[ndevs].dev_offset = dev_offset;
4071 devices_info[ndevs].max_avail = max_avail;
4072 devices_info[ndevs].total_avail = total_avail;
4073 devices_info[ndevs].dev = device;
4074 ++ndevs;
4075 }
4076
4077 /*
4078 * now sort the devices by hole size / available space
4079 */
4080 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4081 btrfs_cmp_device_info, NULL);
4082
4083 /* round down to number of usable stripes */
4084 ndevs -= ndevs % devs_increment;
4085
4086 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4087 ret = -ENOSPC;
4088 goto error;
4089 }
4090
4091 if (devs_max && ndevs > devs_max)
4092 ndevs = devs_max;
4093 /*
4094 * the primary goal is to maximize the number of stripes, so use as many
4095 * devices as possible, even if the stripes are not maximum sized.
4096 */
4097 stripe_size = devices_info[ndevs-1].max_avail;
4098 num_stripes = ndevs * dev_stripes;
4099
4100 /*
4101 * this will have to be fixed for RAID1 and RAID10 over
4102 * more drives
4103 */
4104 data_stripes = num_stripes / ncopies;
4105
4106 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4107 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4108 btrfs_super_stripesize(info->super_copy));
4109 data_stripes = num_stripes - 1;
4110 }
4111 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4112 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4113 btrfs_super_stripesize(info->super_copy));
4114 data_stripes = num_stripes - 2;
4115 }
4116
4117 /*
4118 * Use the number of data stripes to figure out how big this chunk
4119 * is really going to be in terms of logical address space,
4120 * and compare that answer with the max chunk size
4121 */
4122 if (stripe_size * data_stripes > max_chunk_size) {
4123 u64 mask = (1ULL << 24) - 1;
4124 stripe_size = max_chunk_size;
4125 do_div(stripe_size, data_stripes);
4126
4127 /* bump the answer up to a 16MB boundary */
4128 stripe_size = (stripe_size + mask) & ~mask;
4129
4130 /* but don't go higher than the limits we found
4131 * while searching for free extents
4132 */
4133 if (stripe_size > devices_info[ndevs-1].max_avail)
4134 stripe_size = devices_info[ndevs-1].max_avail;
4135 }
4136
4137 do_div(stripe_size, dev_stripes);
4138
4139 /* align to BTRFS_STRIPE_LEN */
4140 do_div(stripe_size, raid_stripe_len);
4141 stripe_size *= raid_stripe_len;
4142
4143 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4144 if (!map) {
4145 ret = -ENOMEM;
4146 goto error;
4147 }
4148 map->num_stripes = num_stripes;
4149
4150 for (i = 0; i < ndevs; ++i) {
4151 for (j = 0; j < dev_stripes; ++j) {
4152 int s = i * dev_stripes + j;
4153 map->stripes[s].dev = devices_info[i].dev;
4154 map->stripes[s].physical = devices_info[i].dev_offset +
4155 j * stripe_size;
4156 }
4157 }
4158 map->sector_size = extent_root->sectorsize;
4159 map->stripe_len = raid_stripe_len;
4160 map->io_align = raid_stripe_len;
4161 map->io_width = raid_stripe_len;
4162 map->type = type;
4163 map->sub_stripes = sub_stripes;
4164
4165 num_bytes = stripe_size * data_stripes;
4166
4167 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4168
4169 em = alloc_extent_map();
4170 if (!em) {
4171 ret = -ENOMEM;
4172 goto error;
4173 }
4174 em->bdev = (struct block_device *)map;
4175 em->start = start;
4176 em->len = num_bytes;
4177 em->block_start = 0;
4178 em->block_len = em->len;
4179 em->orig_block_len = stripe_size;
4180
4181 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4182 write_lock(&em_tree->lock);
4183 ret = add_extent_mapping(em_tree, em, 0);
4184 if (!ret) {
4185 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4186 atomic_inc(&em->refs);
4187 }
4188 write_unlock(&em_tree->lock);
4189 if (ret) {
4190 free_extent_map(em);
4191 goto error;
4192 }
4193
4194 ret = btrfs_make_block_group(trans, extent_root, 0, type,
4195 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4196 start, num_bytes);
4197 if (ret)
4198 goto error_del_extent;
4199
4200 free_extent_map(em);
4201 check_raid56_incompat_flag(extent_root->fs_info, type);
4202
4203 kfree(devices_info);
4204 return 0;
4205
4206 error_del_extent:
4207 write_lock(&em_tree->lock);
4208 remove_extent_mapping(em_tree, em);
4209 write_unlock(&em_tree->lock);
4210
4211 /* One for our allocation */
4212 free_extent_map(em);
4213 /* One for the tree reference */
4214 free_extent_map(em);
4215 error:
4216 kfree(map);
4217 kfree(devices_info);
4218 return ret;
4219 }
4220
4221 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4222 struct btrfs_root *extent_root,
4223 u64 chunk_offset, u64 chunk_size)
4224 {
4225 struct btrfs_key key;
4226 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4227 struct btrfs_device *device;
4228 struct btrfs_chunk *chunk;
4229 struct btrfs_stripe *stripe;
4230 struct extent_map_tree *em_tree;
4231 struct extent_map *em;
4232 struct map_lookup *map;
4233 size_t item_size;
4234 u64 dev_offset;
4235 u64 stripe_size;
4236 int i = 0;
4237 int ret;
4238
4239 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4240 read_lock(&em_tree->lock);
4241 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4242 read_unlock(&em_tree->lock);
4243
4244 if (!em) {
4245 btrfs_crit(extent_root->fs_info, "unable to find logical "
4246 "%Lu len %Lu", chunk_offset, chunk_size);
4247 return -EINVAL;
4248 }
4249
4250 if (em->start != chunk_offset || em->len != chunk_size) {
4251 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4252 " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4253 chunk_size, em->start, em->len);
4254 free_extent_map(em);
4255 return -EINVAL;
4256 }
4257
4258 map = (struct map_lookup *)em->bdev;
4259 item_size = btrfs_chunk_item_size(map->num_stripes);
4260 stripe_size = em->orig_block_len;
4261
4262 chunk = kzalloc(item_size, GFP_NOFS);
4263 if (!chunk) {
4264 ret = -ENOMEM;
4265 goto out;
4266 }
4267
4268 for (i = 0; i < map->num_stripes; i++) {
4269 device = map->stripes[i].dev;
4270 dev_offset = map->stripes[i].physical;
4271
4272 device->bytes_used += stripe_size;
4273 ret = btrfs_update_device(trans, device);
4274 if (ret)
4275 goto out;
4276 ret = btrfs_alloc_dev_extent(trans, device,
4277 chunk_root->root_key.objectid,
4278 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4279 chunk_offset, dev_offset,
4280 stripe_size);
4281 if (ret)
4282 goto out;
4283 }
4284
4285 spin_lock(&extent_root->fs_info->free_chunk_lock);
4286 extent_root->fs_info->free_chunk_space -= (stripe_size *
4287 map->num_stripes);
4288 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4289
4290 stripe = &chunk->stripe;
4291 for (i = 0; i < map->num_stripes; i++) {
4292 device = map->stripes[i].dev;
4293 dev_offset = map->stripes[i].physical;
4294
4295 btrfs_set_stack_stripe_devid(stripe, device->devid);
4296 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4297 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4298 stripe++;
4299 }
4300
4301 btrfs_set_stack_chunk_length(chunk, chunk_size);
4302 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4303 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4304 btrfs_set_stack_chunk_type(chunk, map->type);
4305 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4306 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4307 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4308 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4309 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4310
4311 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4312 key.type = BTRFS_CHUNK_ITEM_KEY;
4313 key.offset = chunk_offset;
4314
4315 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4316 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4317 /*
4318 * TODO: Cleanup of inserted chunk root in case of
4319 * failure.
4320 */
4321 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4322 item_size);
4323 }
4324
4325 out:
4326 kfree(chunk);
4327 free_extent_map(em);
4328 return ret;
4329 }
4330
4331 /*
4332 * Chunk allocation falls into two parts. The first part does works
4333 * that make the new allocated chunk useable, but not do any operation
4334 * that modifies the chunk tree. The second part does the works that
4335 * require modifying the chunk tree. This division is important for the
4336 * bootstrap process of adding storage to a seed btrfs.
4337 */
4338 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4339 struct btrfs_root *extent_root, u64 type)
4340 {
4341 u64 chunk_offset;
4342
4343 chunk_offset = find_next_chunk(extent_root->fs_info);
4344 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4345 }
4346
4347 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4348 struct btrfs_root *root,
4349 struct btrfs_device *device)
4350 {
4351 u64 chunk_offset;
4352 u64 sys_chunk_offset;
4353 u64 alloc_profile;
4354 struct btrfs_fs_info *fs_info = root->fs_info;
4355 struct btrfs_root *extent_root = fs_info->extent_root;
4356 int ret;
4357
4358 chunk_offset = find_next_chunk(fs_info);
4359 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4360 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4361 alloc_profile);
4362 if (ret)
4363 return ret;
4364
4365 sys_chunk_offset = find_next_chunk(root->fs_info);
4366 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4367 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4368 alloc_profile);
4369 if (ret) {
4370 btrfs_abort_transaction(trans, root, ret);
4371 goto out;
4372 }
4373
4374 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4375 if (ret)
4376 btrfs_abort_transaction(trans, root, ret);
4377 out:
4378 return ret;
4379 }
4380
4381 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4382 {
4383 struct extent_map *em;
4384 struct map_lookup *map;
4385 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4386 int readonly = 0;
4387 int i;
4388
4389 read_lock(&map_tree->map_tree.lock);
4390 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4391 read_unlock(&map_tree->map_tree.lock);
4392 if (!em)
4393 return 1;
4394
4395 if (btrfs_test_opt(root, DEGRADED)) {
4396 free_extent_map(em);
4397 return 0;
4398 }
4399
4400 map = (struct map_lookup *)em->bdev;
4401 for (i = 0; i < map->num_stripes; i++) {
4402 if (!map->stripes[i].dev->writeable) {
4403 readonly = 1;
4404 break;
4405 }
4406 }
4407 free_extent_map(em);
4408 return readonly;
4409 }
4410
4411 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4412 {
4413 extent_map_tree_init(&tree->map_tree);
4414 }
4415
4416 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4417 {
4418 struct extent_map *em;
4419
4420 while (1) {
4421 write_lock(&tree->map_tree.lock);
4422 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4423 if (em)
4424 remove_extent_mapping(&tree->map_tree, em);
4425 write_unlock(&tree->map_tree.lock);
4426 if (!em)
4427 break;
4428 kfree(em->bdev);
4429 /* once for us */
4430 free_extent_map(em);
4431 /* once for the tree */
4432 free_extent_map(em);
4433 }
4434 }
4435
4436 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4437 {
4438 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4439 struct extent_map *em;
4440 struct map_lookup *map;
4441 struct extent_map_tree *em_tree = &map_tree->map_tree;
4442 int ret;
4443
4444 read_lock(&em_tree->lock);
4445 em = lookup_extent_mapping(em_tree, logical, len);
4446 read_unlock(&em_tree->lock);
4447
4448 /*
4449 * We could return errors for these cases, but that could get ugly and
4450 * we'd probably do the same thing which is just not do anything else
4451 * and exit, so return 1 so the callers don't try to use other copies.
4452 */
4453 if (!em) {
4454 btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4455 logical+len);
4456 return 1;
4457 }
4458
4459 if (em->start > logical || em->start + em->len < logical) {
4460 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4461 "%Lu-%Lu\n", logical, logical+len, em->start,
4462 em->start + em->len);
4463 return 1;
4464 }
4465
4466 map = (struct map_lookup *)em->bdev;
4467 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4468 ret = map->num_stripes;
4469 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4470 ret = map->sub_stripes;
4471 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4472 ret = 2;
4473 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4474 ret = 3;
4475 else
4476 ret = 1;
4477 free_extent_map(em);
4478
4479 btrfs_dev_replace_lock(&fs_info->dev_replace);
4480 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4481 ret++;
4482 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4483
4484 return ret;
4485 }
4486
4487 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4488 struct btrfs_mapping_tree *map_tree,
4489 u64 logical)
4490 {
4491 struct extent_map *em;
4492 struct map_lookup *map;
4493 struct extent_map_tree *em_tree = &map_tree->map_tree;
4494 unsigned long len = root->sectorsize;
4495
4496 read_lock(&em_tree->lock);
4497 em = lookup_extent_mapping(em_tree, logical, len);
4498 read_unlock(&em_tree->lock);
4499 BUG_ON(!em);
4500
4501 BUG_ON(em->start > logical || em->start + em->len < logical);
4502 map = (struct map_lookup *)em->bdev;
4503 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4504 BTRFS_BLOCK_GROUP_RAID6)) {
4505 len = map->stripe_len * nr_data_stripes(map);
4506 }
4507 free_extent_map(em);
4508 return len;
4509 }
4510
4511 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4512 u64 logical, u64 len, int mirror_num)
4513 {
4514 struct extent_map *em;
4515 struct map_lookup *map;
4516 struct extent_map_tree *em_tree = &map_tree->map_tree;
4517 int ret = 0;
4518
4519 read_lock(&em_tree->lock);
4520 em = lookup_extent_mapping(em_tree, logical, len);
4521 read_unlock(&em_tree->lock);
4522 BUG_ON(!em);
4523
4524 BUG_ON(em->start > logical || em->start + em->len < logical);
4525 map = (struct map_lookup *)em->bdev;
4526 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4527 BTRFS_BLOCK_GROUP_RAID6))
4528 ret = 1;
4529 free_extent_map(em);
4530 return ret;
4531 }
4532
4533 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4534 struct map_lookup *map, int first, int num,
4535 int optimal, int dev_replace_is_ongoing)
4536 {
4537 int i;
4538 int tolerance;
4539 struct btrfs_device *srcdev;
4540
4541 if (dev_replace_is_ongoing &&
4542 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4543 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4544 srcdev = fs_info->dev_replace.srcdev;
4545 else
4546 srcdev = NULL;
4547
4548 /*
4549 * try to avoid the drive that is the source drive for a
4550 * dev-replace procedure, only choose it if no other non-missing
4551 * mirror is available
4552 */
4553 for (tolerance = 0; tolerance < 2; tolerance++) {
4554 if (map->stripes[optimal].dev->bdev &&
4555 (tolerance || map->stripes[optimal].dev != srcdev))
4556 return optimal;
4557 for (i = first; i < first + num; i++) {
4558 if (map->stripes[i].dev->bdev &&
4559 (tolerance || map->stripes[i].dev != srcdev))
4560 return i;
4561 }
4562 }
4563
4564 /* we couldn't find one that doesn't fail. Just return something
4565 * and the io error handling code will clean up eventually
4566 */
4567 return optimal;
4568 }
4569
4570 static inline int parity_smaller(u64 a, u64 b)
4571 {
4572 return a > b;
4573 }
4574
4575 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4576 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4577 {
4578 struct btrfs_bio_stripe s;
4579 int i;
4580 u64 l;
4581 int again = 1;
4582
4583 while (again) {
4584 again = 0;
4585 for (i = 0; i < bbio->num_stripes - 1; i++) {
4586 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4587 s = bbio->stripes[i];
4588 l = raid_map[i];
4589 bbio->stripes[i] = bbio->stripes[i+1];
4590 raid_map[i] = raid_map[i+1];
4591 bbio->stripes[i+1] = s;
4592 raid_map[i+1] = l;
4593 again = 1;
4594 }
4595 }
4596 }
4597 }
4598
4599 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4600 u64 logical, u64 *length,
4601 struct btrfs_bio **bbio_ret,
4602 int mirror_num, u64 **raid_map_ret)
4603 {
4604 struct extent_map *em;
4605 struct map_lookup *map;
4606 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4607 struct extent_map_tree *em_tree = &map_tree->map_tree;
4608 u64 offset;
4609 u64 stripe_offset;
4610 u64 stripe_end_offset;
4611 u64 stripe_nr;
4612 u64 stripe_nr_orig;
4613 u64 stripe_nr_end;
4614 u64 stripe_len;
4615 u64 *raid_map = NULL;
4616 int stripe_index;
4617 int i;
4618 int ret = 0;
4619 int num_stripes;
4620 int max_errors = 0;
4621 struct btrfs_bio *bbio = NULL;
4622 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4623 int dev_replace_is_ongoing = 0;
4624 int num_alloc_stripes;
4625 int patch_the_first_stripe_for_dev_replace = 0;
4626 u64 physical_to_patch_in_first_stripe = 0;
4627 u64 raid56_full_stripe_start = (u64)-1;
4628
4629 read_lock(&em_tree->lock);
4630 em = lookup_extent_mapping(em_tree, logical, *length);
4631 read_unlock(&em_tree->lock);
4632
4633 if (!em) {
4634 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4635 (unsigned long long)logical,
4636 (unsigned long long)*length);
4637 return -EINVAL;
4638 }
4639
4640 if (em->start > logical || em->start + em->len < logical) {
4641 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4642 "found %Lu-%Lu\n", logical, em->start,
4643 em->start + em->len);
4644 return -EINVAL;
4645 }
4646
4647 map = (struct map_lookup *)em->bdev;
4648 offset = logical - em->start;
4649
4650 stripe_len = map->stripe_len;
4651 stripe_nr = offset;
4652 /*
4653 * stripe_nr counts the total number of stripes we have to stride
4654 * to get to this block
4655 */
4656 do_div(stripe_nr, stripe_len);
4657
4658 stripe_offset = stripe_nr * stripe_len;
4659 BUG_ON(offset < stripe_offset);
4660
4661 /* stripe_offset is the offset of this block in its stripe*/
4662 stripe_offset = offset - stripe_offset;
4663
4664 /* if we're here for raid56, we need to know the stripe aligned start */
4665 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4666 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4667 raid56_full_stripe_start = offset;
4668
4669 /* allow a write of a full stripe, but make sure we don't
4670 * allow straddling of stripes
4671 */
4672 do_div(raid56_full_stripe_start, full_stripe_len);
4673 raid56_full_stripe_start *= full_stripe_len;
4674 }
4675
4676 if (rw & REQ_DISCARD) {
4677 /* we don't discard raid56 yet */
4678 if (map->type &
4679 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4680 ret = -EOPNOTSUPP;
4681 goto out;
4682 }
4683 *length = min_t(u64, em->len - offset, *length);
4684 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4685 u64 max_len;
4686 /* For writes to RAID[56], allow a full stripeset across all disks.
4687 For other RAID types and for RAID[56] reads, just allow a single
4688 stripe (on a single disk). */
4689 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4690 (rw & REQ_WRITE)) {
4691 max_len = stripe_len * nr_data_stripes(map) -
4692 (offset - raid56_full_stripe_start);
4693 } else {
4694 /* we limit the length of each bio to what fits in a stripe */
4695 max_len = stripe_len - stripe_offset;
4696 }
4697 *length = min_t(u64, em->len - offset, max_len);
4698 } else {
4699 *length = em->len - offset;
4700 }
4701
4702 /* This is for when we're called from btrfs_merge_bio_hook() and all
4703 it cares about is the length */
4704 if (!bbio_ret)
4705 goto out;
4706
4707 btrfs_dev_replace_lock(dev_replace);
4708 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4709 if (!dev_replace_is_ongoing)
4710 btrfs_dev_replace_unlock(dev_replace);
4711
4712 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4713 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4714 dev_replace->tgtdev != NULL) {
4715 /*
4716 * in dev-replace case, for repair case (that's the only
4717 * case where the mirror is selected explicitly when
4718 * calling btrfs_map_block), blocks left of the left cursor
4719 * can also be read from the target drive.
4720 * For REQ_GET_READ_MIRRORS, the target drive is added as
4721 * the last one to the array of stripes. For READ, it also
4722 * needs to be supported using the same mirror number.
4723 * If the requested block is not left of the left cursor,
4724 * EIO is returned. This can happen because btrfs_num_copies()
4725 * returns one more in the dev-replace case.
4726 */
4727 u64 tmp_length = *length;
4728 struct btrfs_bio *tmp_bbio = NULL;
4729 int tmp_num_stripes;
4730 u64 srcdev_devid = dev_replace->srcdev->devid;
4731 int index_srcdev = 0;
4732 int found = 0;
4733 u64 physical_of_found = 0;
4734
4735 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4736 logical, &tmp_length, &tmp_bbio, 0, NULL);
4737 if (ret) {
4738 WARN_ON(tmp_bbio != NULL);
4739 goto out;
4740 }
4741
4742 tmp_num_stripes = tmp_bbio->num_stripes;
4743 if (mirror_num > tmp_num_stripes) {
4744 /*
4745 * REQ_GET_READ_MIRRORS does not contain this
4746 * mirror, that means that the requested area
4747 * is not left of the left cursor
4748 */
4749 ret = -EIO;
4750 kfree(tmp_bbio);
4751 goto out;
4752 }
4753
4754 /*
4755 * process the rest of the function using the mirror_num
4756 * of the source drive. Therefore look it up first.
4757 * At the end, patch the device pointer to the one of the
4758 * target drive.
4759 */
4760 for (i = 0; i < tmp_num_stripes; i++) {
4761 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4762 /*
4763 * In case of DUP, in order to keep it
4764 * simple, only add the mirror with the
4765 * lowest physical address
4766 */
4767 if (found &&
4768 physical_of_found <=
4769 tmp_bbio->stripes[i].physical)
4770 continue;
4771 index_srcdev = i;
4772 found = 1;
4773 physical_of_found =
4774 tmp_bbio->stripes[i].physical;
4775 }
4776 }
4777
4778 if (found) {
4779 mirror_num = index_srcdev + 1;
4780 patch_the_first_stripe_for_dev_replace = 1;
4781 physical_to_patch_in_first_stripe = physical_of_found;
4782 } else {
4783 WARN_ON(1);
4784 ret = -EIO;
4785 kfree(tmp_bbio);
4786 goto out;
4787 }
4788
4789 kfree(tmp_bbio);
4790 } else if (mirror_num > map->num_stripes) {
4791 mirror_num = 0;
4792 }
4793
4794 num_stripes = 1;
4795 stripe_index = 0;
4796 stripe_nr_orig = stripe_nr;
4797 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4798 do_div(stripe_nr_end, map->stripe_len);
4799 stripe_end_offset = stripe_nr_end * map->stripe_len -
4800 (offset + *length);
4801
4802 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4803 if (rw & REQ_DISCARD)
4804 num_stripes = min_t(u64, map->num_stripes,
4805 stripe_nr_end - stripe_nr_orig);
4806 stripe_index = do_div(stripe_nr, map->num_stripes);
4807 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4808 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4809 num_stripes = map->num_stripes;
4810 else if (mirror_num)
4811 stripe_index = mirror_num - 1;
4812 else {
4813 stripe_index = find_live_mirror(fs_info, map, 0,
4814 map->num_stripes,
4815 current->pid % map->num_stripes,
4816 dev_replace_is_ongoing);
4817 mirror_num = stripe_index + 1;
4818 }
4819
4820 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4821 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4822 num_stripes = map->num_stripes;
4823 } else if (mirror_num) {
4824 stripe_index = mirror_num - 1;
4825 } else {
4826 mirror_num = 1;
4827 }
4828
4829 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4830 int factor = map->num_stripes / map->sub_stripes;
4831
4832 stripe_index = do_div(stripe_nr, factor);
4833 stripe_index *= map->sub_stripes;
4834
4835 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4836 num_stripes = map->sub_stripes;
4837 else if (rw & REQ_DISCARD)
4838 num_stripes = min_t(u64, map->sub_stripes *
4839 (stripe_nr_end - stripe_nr_orig),
4840 map->num_stripes);
4841 else if (mirror_num)
4842 stripe_index += mirror_num - 1;
4843 else {
4844 int old_stripe_index = stripe_index;
4845 stripe_index = find_live_mirror(fs_info, map,
4846 stripe_index,
4847 map->sub_stripes, stripe_index +
4848 current->pid % map->sub_stripes,
4849 dev_replace_is_ongoing);
4850 mirror_num = stripe_index - old_stripe_index + 1;
4851 }
4852
4853 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4854 BTRFS_BLOCK_GROUP_RAID6)) {
4855 u64 tmp;
4856
4857 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4858 && raid_map_ret) {
4859 int i, rot;
4860
4861 /* push stripe_nr back to the start of the full stripe */
4862 stripe_nr = raid56_full_stripe_start;
4863 do_div(stripe_nr, stripe_len);
4864
4865 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4866
4867 /* RAID[56] write or recovery. Return all stripes */
4868 num_stripes = map->num_stripes;
4869 max_errors = nr_parity_stripes(map);
4870
4871 raid_map = kmalloc(sizeof(u64) * num_stripes,
4872 GFP_NOFS);
4873 if (!raid_map) {
4874 ret = -ENOMEM;
4875 goto out;
4876 }
4877
4878 /* Work out the disk rotation on this stripe-set */
4879 tmp = stripe_nr;
4880 rot = do_div(tmp, num_stripes);
4881
4882 /* Fill in the logical address of each stripe */
4883 tmp = stripe_nr * nr_data_stripes(map);
4884 for (i = 0; i < nr_data_stripes(map); i++)
4885 raid_map[(i+rot) % num_stripes] =
4886 em->start + (tmp + i) * map->stripe_len;
4887
4888 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4889 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4890 raid_map[(i+rot+1) % num_stripes] =
4891 RAID6_Q_STRIPE;
4892
4893 *length = map->stripe_len;
4894 stripe_index = 0;
4895 stripe_offset = 0;
4896 } else {
4897 /*
4898 * Mirror #0 or #1 means the original data block.
4899 * Mirror #2 is RAID5 parity block.
4900 * Mirror #3 is RAID6 Q block.
4901 */
4902 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4903 if (mirror_num > 1)
4904 stripe_index = nr_data_stripes(map) +
4905 mirror_num - 2;
4906
4907 /* We distribute the parity blocks across stripes */
4908 tmp = stripe_nr + stripe_index;
4909 stripe_index = do_div(tmp, map->num_stripes);
4910 }
4911 } else {
4912 /*
4913 * after this do_div call, stripe_nr is the number of stripes
4914 * on this device we have to walk to find the data, and
4915 * stripe_index is the number of our device in the stripe array
4916 */
4917 stripe_index = do_div(stripe_nr, map->num_stripes);
4918 mirror_num = stripe_index + 1;
4919 }
4920 BUG_ON(stripe_index >= map->num_stripes);
4921
4922 num_alloc_stripes = num_stripes;
4923 if (dev_replace_is_ongoing) {
4924 if (rw & (REQ_WRITE | REQ_DISCARD))
4925 num_alloc_stripes <<= 1;
4926 if (rw & REQ_GET_READ_MIRRORS)
4927 num_alloc_stripes++;
4928 }
4929 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4930 if (!bbio) {
4931 kfree(raid_map);
4932 ret = -ENOMEM;
4933 goto out;
4934 }
4935 atomic_set(&bbio->error, 0);
4936
4937 if (rw & REQ_DISCARD) {
4938 int factor = 0;
4939 int sub_stripes = 0;
4940 u64 stripes_per_dev = 0;
4941 u32 remaining_stripes = 0;
4942 u32 last_stripe = 0;
4943
4944 if (map->type &
4945 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4946 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4947 sub_stripes = 1;
4948 else
4949 sub_stripes = map->sub_stripes;
4950
4951 factor = map->num_stripes / sub_stripes;
4952 stripes_per_dev = div_u64_rem(stripe_nr_end -
4953 stripe_nr_orig,
4954 factor,
4955 &remaining_stripes);
4956 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4957 last_stripe *= sub_stripes;
4958 }
4959
4960 for (i = 0; i < num_stripes; i++) {
4961 bbio->stripes[i].physical =
4962 map->stripes[stripe_index].physical +
4963 stripe_offset + stripe_nr * map->stripe_len;
4964 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4965
4966 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4967 BTRFS_BLOCK_GROUP_RAID10)) {
4968 bbio->stripes[i].length = stripes_per_dev *
4969 map->stripe_len;
4970
4971 if (i / sub_stripes < remaining_stripes)
4972 bbio->stripes[i].length +=
4973 map->stripe_len;
4974
4975 /*
4976 * Special for the first stripe and
4977 * the last stripe:
4978 *
4979 * |-------|...|-------|
4980 * |----------|
4981 * off end_off
4982 */
4983 if (i < sub_stripes)
4984 bbio->stripes[i].length -=
4985 stripe_offset;
4986
4987 if (stripe_index >= last_stripe &&
4988 stripe_index <= (last_stripe +
4989 sub_stripes - 1))
4990 bbio->stripes[i].length -=
4991 stripe_end_offset;
4992
4993 if (i == sub_stripes - 1)
4994 stripe_offset = 0;
4995 } else
4996 bbio->stripes[i].length = *length;
4997
4998 stripe_index++;
4999 if (stripe_index == map->num_stripes) {
5000 /* This could only happen for RAID0/10 */
5001 stripe_index = 0;
5002 stripe_nr++;
5003 }
5004 }
5005 } else {
5006 for (i = 0; i < num_stripes; i++) {
5007 bbio->stripes[i].physical =
5008 map->stripes[stripe_index].physical +
5009 stripe_offset +
5010 stripe_nr * map->stripe_len;
5011 bbio->stripes[i].dev =
5012 map->stripes[stripe_index].dev;
5013 stripe_index++;
5014 }
5015 }
5016
5017 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5018 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5019 BTRFS_BLOCK_GROUP_RAID10 |
5020 BTRFS_BLOCK_GROUP_RAID5 |
5021 BTRFS_BLOCK_GROUP_DUP)) {
5022 max_errors = 1;
5023 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5024 max_errors = 2;
5025 }
5026 }
5027
5028 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5029 dev_replace->tgtdev != NULL) {
5030 int index_where_to_add;
5031 u64 srcdev_devid = dev_replace->srcdev->devid;
5032
5033 /*
5034 * duplicate the write operations while the dev replace
5035 * procedure is running. Since the copying of the old disk
5036 * to the new disk takes place at run time while the
5037 * filesystem is mounted writable, the regular write
5038 * operations to the old disk have to be duplicated to go
5039 * to the new disk as well.
5040 * Note that device->missing is handled by the caller, and
5041 * that the write to the old disk is already set up in the
5042 * stripes array.
5043 */
5044 index_where_to_add = num_stripes;
5045 for (i = 0; i < num_stripes; i++) {
5046 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5047 /* write to new disk, too */
5048 struct btrfs_bio_stripe *new =
5049 bbio->stripes + index_where_to_add;
5050 struct btrfs_bio_stripe *old =
5051 bbio->stripes + i;
5052
5053 new->physical = old->physical;
5054 new->length = old->length;
5055 new->dev = dev_replace->tgtdev;
5056 index_where_to_add++;
5057 max_errors++;
5058 }
5059 }
5060 num_stripes = index_where_to_add;
5061 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5062 dev_replace->tgtdev != NULL) {
5063 u64 srcdev_devid = dev_replace->srcdev->devid;
5064 int index_srcdev = 0;
5065 int found = 0;
5066 u64 physical_of_found = 0;
5067
5068 /*
5069 * During the dev-replace procedure, the target drive can
5070 * also be used to read data in case it is needed to repair
5071 * a corrupt block elsewhere. This is possible if the
5072 * requested area is left of the left cursor. In this area,
5073 * the target drive is a full copy of the source drive.
5074 */
5075 for (i = 0; i < num_stripes; i++) {
5076 if (bbio->stripes[i].dev->devid == srcdev_devid) {
5077 /*
5078 * In case of DUP, in order to keep it
5079 * simple, only add the mirror with the
5080 * lowest physical address
5081 */
5082 if (found &&
5083 physical_of_found <=
5084 bbio->stripes[i].physical)
5085 continue;
5086 index_srcdev = i;
5087 found = 1;
5088 physical_of_found = bbio->stripes[i].physical;
5089 }
5090 }
5091 if (found) {
5092 u64 length = map->stripe_len;
5093
5094 if (physical_of_found + length <=
5095 dev_replace->cursor_left) {
5096 struct btrfs_bio_stripe *tgtdev_stripe =
5097 bbio->stripes + num_stripes;
5098
5099 tgtdev_stripe->physical = physical_of_found;
5100 tgtdev_stripe->length =
5101 bbio->stripes[index_srcdev].length;
5102 tgtdev_stripe->dev = dev_replace->tgtdev;
5103
5104 num_stripes++;
5105 }
5106 }
5107 }
5108
5109 *bbio_ret = bbio;
5110 bbio->num_stripes = num_stripes;
5111 bbio->max_errors = max_errors;
5112 bbio->mirror_num = mirror_num;
5113
5114 /*
5115 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5116 * mirror_num == num_stripes + 1 && dev_replace target drive is
5117 * available as a mirror
5118 */
5119 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5120 WARN_ON(num_stripes > 1);
5121 bbio->stripes[0].dev = dev_replace->tgtdev;
5122 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5123 bbio->mirror_num = map->num_stripes + 1;
5124 }
5125 if (raid_map) {
5126 sort_parity_stripes(bbio, raid_map);
5127 *raid_map_ret = raid_map;
5128 }
5129 out:
5130 if (dev_replace_is_ongoing)
5131 btrfs_dev_replace_unlock(dev_replace);
5132 free_extent_map(em);
5133 return ret;
5134 }
5135
5136 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5137 u64 logical, u64 *length,
5138 struct btrfs_bio **bbio_ret, int mirror_num)
5139 {
5140 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5141 mirror_num, NULL);
5142 }
5143
5144 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5145 u64 chunk_start, u64 physical, u64 devid,
5146 u64 **logical, int *naddrs, int *stripe_len)
5147 {
5148 struct extent_map_tree *em_tree = &map_tree->map_tree;
5149 struct extent_map *em;
5150 struct map_lookup *map;
5151 u64 *buf;
5152 u64 bytenr;
5153 u64 length;
5154 u64 stripe_nr;
5155 u64 rmap_len;
5156 int i, j, nr = 0;
5157
5158 read_lock(&em_tree->lock);
5159 em = lookup_extent_mapping(em_tree, chunk_start, 1);
5160 read_unlock(&em_tree->lock);
5161
5162 if (!em) {
5163 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
5164 chunk_start);
5165 return -EIO;
5166 }
5167
5168 if (em->start != chunk_start) {
5169 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
5170 em->start, chunk_start);
5171 free_extent_map(em);
5172 return -EIO;
5173 }
5174 map = (struct map_lookup *)em->bdev;
5175
5176 length = em->len;
5177 rmap_len = map->stripe_len;
5178
5179 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5180 do_div(length, map->num_stripes / map->sub_stripes);
5181 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5182 do_div(length, map->num_stripes);
5183 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5184 BTRFS_BLOCK_GROUP_RAID6)) {
5185 do_div(length, nr_data_stripes(map));
5186 rmap_len = map->stripe_len * nr_data_stripes(map);
5187 }
5188
5189 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5190 BUG_ON(!buf); /* -ENOMEM */
5191
5192 for (i = 0; i < map->num_stripes; i++) {
5193 if (devid && map->stripes[i].dev->devid != devid)
5194 continue;
5195 if (map->stripes[i].physical > physical ||
5196 map->stripes[i].physical + length <= physical)
5197 continue;
5198
5199 stripe_nr = physical - map->stripes[i].physical;
5200 do_div(stripe_nr, map->stripe_len);
5201
5202 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5203 stripe_nr = stripe_nr * map->num_stripes + i;
5204 do_div(stripe_nr, map->sub_stripes);
5205 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5206 stripe_nr = stripe_nr * map->num_stripes + i;
5207 } /* else if RAID[56], multiply by nr_data_stripes().
5208 * Alternatively, just use rmap_len below instead of
5209 * map->stripe_len */
5210
5211 bytenr = chunk_start + stripe_nr * rmap_len;
5212 WARN_ON(nr >= map->num_stripes);
5213 for (j = 0; j < nr; j++) {
5214 if (buf[j] == bytenr)
5215 break;
5216 }
5217 if (j == nr) {
5218 WARN_ON(nr >= map->num_stripes);
5219 buf[nr++] = bytenr;
5220 }
5221 }
5222
5223 *logical = buf;
5224 *naddrs = nr;
5225 *stripe_len = rmap_len;
5226
5227 free_extent_map(em);
5228 return 0;
5229 }
5230
5231 static void btrfs_end_bio(struct bio *bio, int err)
5232 {
5233 struct btrfs_bio *bbio = bio->bi_private;
5234 int is_orig_bio = 0;
5235
5236 if (err) {
5237 atomic_inc(&bbio->error);
5238 if (err == -EIO || err == -EREMOTEIO) {
5239 unsigned int stripe_index =
5240 btrfs_io_bio(bio)->stripe_index;
5241 struct btrfs_device *dev;
5242
5243 BUG_ON(stripe_index >= bbio->num_stripes);
5244 dev = bbio->stripes[stripe_index].dev;
5245 if (dev->bdev) {
5246 if (bio->bi_rw & WRITE)
5247 btrfs_dev_stat_inc(dev,
5248 BTRFS_DEV_STAT_WRITE_ERRS);
5249 else
5250 btrfs_dev_stat_inc(dev,
5251 BTRFS_DEV_STAT_READ_ERRS);
5252 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5253 btrfs_dev_stat_inc(dev,
5254 BTRFS_DEV_STAT_FLUSH_ERRS);
5255 btrfs_dev_stat_print_on_error(dev);
5256 }
5257 }
5258 }
5259
5260 if (bio == bbio->orig_bio)
5261 is_orig_bio = 1;
5262
5263 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5264 if (!is_orig_bio) {
5265 bio_put(bio);
5266 bio = bbio->orig_bio;
5267 }
5268 bio->bi_private = bbio->private;
5269 bio->bi_end_io = bbio->end_io;
5270 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5271 /* only send an error to the higher layers if it is
5272 * beyond the tolerance of the btrfs bio
5273 */
5274 if (atomic_read(&bbio->error) > bbio->max_errors) {
5275 err = -EIO;
5276 } else {
5277 /*
5278 * this bio is actually up to date, we didn't
5279 * go over the max number of errors
5280 */
5281 set_bit(BIO_UPTODATE, &bio->bi_flags);
5282 err = 0;
5283 }
5284 kfree(bbio);
5285
5286 bio_endio(bio, err);
5287 } else if (!is_orig_bio) {
5288 bio_put(bio);
5289 }
5290 }
5291
5292 struct async_sched {
5293 struct bio *bio;
5294 int rw;
5295 struct btrfs_fs_info *info;
5296 struct btrfs_work work;
5297 };
5298
5299 /*
5300 * see run_scheduled_bios for a description of why bios are collected for
5301 * async submit.
5302 *
5303 * This will add one bio to the pending list for a device and make sure
5304 * the work struct is scheduled.
5305 */
5306 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5307 struct btrfs_device *device,
5308 int rw, struct bio *bio)
5309 {
5310 int should_queue = 1;
5311 struct btrfs_pending_bios *pending_bios;
5312
5313 if (device->missing || !device->bdev) {
5314 bio_endio(bio, -EIO);
5315 return;
5316 }
5317
5318 /* don't bother with additional async steps for reads, right now */
5319 if (!(rw & REQ_WRITE)) {
5320 bio_get(bio);
5321 btrfsic_submit_bio(rw, bio);
5322 bio_put(bio);
5323 return;
5324 }
5325
5326 /*
5327 * nr_async_bios allows us to reliably return congestion to the
5328 * higher layers. Otherwise, the async bio makes it appear we have
5329 * made progress against dirty pages when we've really just put it
5330 * on a queue for later
5331 */
5332 atomic_inc(&root->fs_info->nr_async_bios);
5333 WARN_ON(bio->bi_next);
5334 bio->bi_next = NULL;
5335 bio->bi_rw |= rw;
5336
5337 spin_lock(&device->io_lock);
5338 if (bio->bi_rw & REQ_SYNC)
5339 pending_bios = &device->pending_sync_bios;
5340 else
5341 pending_bios = &device->pending_bios;
5342
5343 if (pending_bios->tail)
5344 pending_bios->tail->bi_next = bio;
5345
5346 pending_bios->tail = bio;
5347 if (!pending_bios->head)
5348 pending_bios->head = bio;
5349 if (device->running_pending)
5350 should_queue = 0;
5351
5352 spin_unlock(&device->io_lock);
5353
5354 if (should_queue)
5355 btrfs_queue_worker(&root->fs_info->submit_workers,
5356 &device->work);
5357 }
5358
5359 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5360 sector_t sector)
5361 {
5362 struct bio_vec *prev;
5363 struct request_queue *q = bdev_get_queue(bdev);
5364 unsigned short max_sectors = queue_max_sectors(q);
5365 struct bvec_merge_data bvm = {
5366 .bi_bdev = bdev,
5367 .bi_sector = sector,
5368 .bi_rw = bio->bi_rw,
5369 };
5370
5371 if (bio->bi_vcnt == 0) {
5372 WARN_ON(1);
5373 return 1;
5374 }
5375
5376 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5377 if (bio_sectors(bio) > max_sectors)
5378 return 0;
5379
5380 if (!q->merge_bvec_fn)
5381 return 1;
5382
5383 bvm.bi_size = bio->bi_size - prev->bv_len;
5384 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5385 return 0;
5386 return 1;
5387 }
5388
5389 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5390 struct bio *bio, u64 physical, int dev_nr,
5391 int rw, int async)
5392 {
5393 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5394
5395 bio->bi_private = bbio;
5396 btrfs_io_bio(bio)->stripe_index = dev_nr;
5397 bio->bi_end_io = btrfs_end_bio;
5398 bio->bi_sector = physical >> 9;
5399 #ifdef DEBUG
5400 {
5401 struct rcu_string *name;
5402
5403 rcu_read_lock();
5404 name = rcu_dereference(dev->name);
5405 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5406 "(%s id %llu), size=%u\n", rw,
5407 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5408 name->str, dev->devid, bio->bi_size);
5409 rcu_read_unlock();
5410 }
5411 #endif
5412 bio->bi_bdev = dev->bdev;
5413 if (async)
5414 btrfs_schedule_bio(root, dev, rw, bio);
5415 else
5416 btrfsic_submit_bio(rw, bio);
5417 }
5418
5419 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5420 struct bio *first_bio, struct btrfs_device *dev,
5421 int dev_nr, int rw, int async)
5422 {
5423 struct bio_vec *bvec = first_bio->bi_io_vec;
5424 struct bio *bio;
5425 int nr_vecs = bio_get_nr_vecs(dev->bdev);
5426 u64 physical = bbio->stripes[dev_nr].physical;
5427
5428 again:
5429 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5430 if (!bio)
5431 return -ENOMEM;
5432
5433 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5434 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5435 bvec->bv_offset) < bvec->bv_len) {
5436 u64 len = bio->bi_size;
5437
5438 atomic_inc(&bbio->stripes_pending);
5439 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5440 rw, async);
5441 physical += len;
5442 goto again;
5443 }
5444 bvec++;
5445 }
5446
5447 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5448 return 0;
5449 }
5450
5451 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5452 {
5453 atomic_inc(&bbio->error);
5454 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5455 bio->bi_private = bbio->private;
5456 bio->bi_end_io = bbio->end_io;
5457 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5458 bio->bi_sector = logical >> 9;
5459 kfree(bbio);
5460 bio_endio(bio, -EIO);
5461 }
5462 }
5463
5464 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5465 int mirror_num, int async_submit)
5466 {
5467 struct btrfs_device *dev;
5468 struct bio *first_bio = bio;
5469 u64 logical = (u64)bio->bi_sector << 9;
5470 u64 length = 0;
5471 u64 map_length;
5472 u64 *raid_map = NULL;
5473 int ret;
5474 int dev_nr = 0;
5475 int total_devs = 1;
5476 struct btrfs_bio *bbio = NULL;
5477
5478 length = bio->bi_size;
5479 map_length = length;
5480
5481 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5482 mirror_num, &raid_map);
5483 if (ret) /* -ENOMEM */
5484 return ret;
5485
5486 total_devs = bbio->num_stripes;
5487 bbio->orig_bio = first_bio;
5488 bbio->private = first_bio->bi_private;
5489 bbio->end_io = first_bio->bi_end_io;
5490 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5491
5492 if (raid_map) {
5493 /* In this case, map_length has been set to the length of
5494 a single stripe; not the whole write */
5495 if (rw & WRITE) {
5496 return raid56_parity_write(root, bio, bbio,
5497 raid_map, map_length);
5498 } else {
5499 return raid56_parity_recover(root, bio, bbio,
5500 raid_map, map_length,
5501 mirror_num);
5502 }
5503 }
5504
5505 if (map_length < length) {
5506 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5507 (unsigned long long)logical,
5508 (unsigned long long)length,
5509 (unsigned long long)map_length);
5510 BUG();
5511 }
5512
5513 while (dev_nr < total_devs) {
5514 dev = bbio->stripes[dev_nr].dev;
5515 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5516 bbio_error(bbio, first_bio, logical);
5517 dev_nr++;
5518 continue;
5519 }
5520
5521 /*
5522 * Check and see if we're ok with this bio based on it's size
5523 * and offset with the given device.
5524 */
5525 if (!bio_size_ok(dev->bdev, first_bio,
5526 bbio->stripes[dev_nr].physical >> 9)) {
5527 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5528 dev_nr, rw, async_submit);
5529 BUG_ON(ret);
5530 dev_nr++;
5531 continue;
5532 }
5533
5534 if (dev_nr < total_devs - 1) {
5535 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5536 BUG_ON(!bio); /* -ENOMEM */
5537 } else {
5538 bio = first_bio;
5539 }
5540
5541 submit_stripe_bio(root, bbio, bio,
5542 bbio->stripes[dev_nr].physical, dev_nr, rw,
5543 async_submit);
5544 dev_nr++;
5545 }
5546 return 0;
5547 }
5548
5549 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5550 u8 *uuid, u8 *fsid)
5551 {
5552 struct btrfs_device *device;
5553 struct btrfs_fs_devices *cur_devices;
5554
5555 cur_devices = fs_info->fs_devices;
5556 while (cur_devices) {
5557 if (!fsid ||
5558 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5559 device = __find_device(&cur_devices->devices,
5560 devid, uuid);
5561 if (device)
5562 return device;
5563 }
5564 cur_devices = cur_devices->seed;
5565 }
5566 return NULL;
5567 }
5568
5569 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5570 u64 devid, u8 *dev_uuid)
5571 {
5572 struct btrfs_device *device;
5573 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5574
5575 device = kzalloc(sizeof(*device), GFP_NOFS);
5576 if (!device)
5577 return NULL;
5578 list_add(&device->dev_list,
5579 &fs_devices->devices);
5580 device->devid = devid;
5581 device->work.func = pending_bios_fn;
5582 device->fs_devices = fs_devices;
5583 device->missing = 1;
5584 fs_devices->num_devices++;
5585 fs_devices->missing_devices++;
5586 spin_lock_init(&device->io_lock);
5587 INIT_LIST_HEAD(&device->dev_alloc_list);
5588 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5589 return device;
5590 }
5591
5592 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5593 struct extent_buffer *leaf,
5594 struct btrfs_chunk *chunk)
5595 {
5596 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5597 struct map_lookup *map;
5598 struct extent_map *em;
5599 u64 logical;
5600 u64 length;
5601 u64 devid;
5602 u8 uuid[BTRFS_UUID_SIZE];
5603 int num_stripes;
5604 int ret;
5605 int i;
5606
5607 logical = key->offset;
5608 length = btrfs_chunk_length(leaf, chunk);
5609
5610 read_lock(&map_tree->map_tree.lock);
5611 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5612 read_unlock(&map_tree->map_tree.lock);
5613
5614 /* already mapped? */
5615 if (em && em->start <= logical && em->start + em->len > logical) {
5616 free_extent_map(em);
5617 return 0;
5618 } else if (em) {
5619 free_extent_map(em);
5620 }
5621
5622 em = alloc_extent_map();
5623 if (!em)
5624 return -ENOMEM;
5625 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5626 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5627 if (!map) {
5628 free_extent_map(em);
5629 return -ENOMEM;
5630 }
5631
5632 em->bdev = (struct block_device *)map;
5633 em->start = logical;
5634 em->len = length;
5635 em->orig_start = 0;
5636 em->block_start = 0;
5637 em->block_len = em->len;
5638
5639 map->num_stripes = num_stripes;
5640 map->io_width = btrfs_chunk_io_width(leaf, chunk);
5641 map->io_align = btrfs_chunk_io_align(leaf, chunk);
5642 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5643 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5644 map->type = btrfs_chunk_type(leaf, chunk);
5645 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5646 for (i = 0; i < num_stripes; i++) {
5647 map->stripes[i].physical =
5648 btrfs_stripe_offset_nr(leaf, chunk, i);
5649 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5650 read_extent_buffer(leaf, uuid, (unsigned long)
5651 btrfs_stripe_dev_uuid_nr(chunk, i),
5652 BTRFS_UUID_SIZE);
5653 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5654 uuid, NULL);
5655 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5656 kfree(map);
5657 free_extent_map(em);
5658 return -EIO;
5659 }
5660 if (!map->stripes[i].dev) {
5661 map->stripes[i].dev =
5662 add_missing_dev(root, devid, uuid);
5663 if (!map->stripes[i].dev) {
5664 kfree(map);
5665 free_extent_map(em);
5666 return -EIO;
5667 }
5668 }
5669 map->stripes[i].dev->in_fs_metadata = 1;
5670 }
5671
5672 write_lock(&map_tree->map_tree.lock);
5673 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5674 write_unlock(&map_tree->map_tree.lock);
5675 BUG_ON(ret); /* Tree corruption */
5676 free_extent_map(em);
5677
5678 return 0;
5679 }
5680
5681 static void fill_device_from_item(struct extent_buffer *leaf,
5682 struct btrfs_dev_item *dev_item,
5683 struct btrfs_device *device)
5684 {
5685 unsigned long ptr;
5686
5687 device->devid = btrfs_device_id(leaf, dev_item);
5688 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5689 device->total_bytes = device->disk_total_bytes;
5690 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5691 device->type = btrfs_device_type(leaf, dev_item);
5692 device->io_align = btrfs_device_io_align(leaf, dev_item);
5693 device->io_width = btrfs_device_io_width(leaf, dev_item);
5694 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5695 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5696 device->is_tgtdev_for_dev_replace = 0;
5697
5698 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5699 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5700 }
5701
5702 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5703 {
5704 struct btrfs_fs_devices *fs_devices;
5705 int ret;
5706
5707 BUG_ON(!mutex_is_locked(&uuid_mutex));
5708
5709 fs_devices = root->fs_info->fs_devices->seed;
5710 while (fs_devices) {
5711 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5712 ret = 0;
5713 goto out;
5714 }
5715 fs_devices = fs_devices->seed;
5716 }
5717
5718 fs_devices = find_fsid(fsid);
5719 if (!fs_devices) {
5720 ret = -ENOENT;
5721 goto out;
5722 }
5723
5724 fs_devices = clone_fs_devices(fs_devices);
5725 if (IS_ERR(fs_devices)) {
5726 ret = PTR_ERR(fs_devices);
5727 goto out;
5728 }
5729
5730 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5731 root->fs_info->bdev_holder);
5732 if (ret) {
5733 free_fs_devices(fs_devices);
5734 goto out;
5735 }
5736
5737 if (!fs_devices->seeding) {
5738 __btrfs_close_devices(fs_devices);
5739 free_fs_devices(fs_devices);
5740 ret = -EINVAL;
5741 goto out;
5742 }
5743
5744 fs_devices->seed = root->fs_info->fs_devices->seed;
5745 root->fs_info->fs_devices->seed = fs_devices;
5746 out:
5747 return ret;
5748 }
5749
5750 static int read_one_dev(struct btrfs_root *root,
5751 struct extent_buffer *leaf,
5752 struct btrfs_dev_item *dev_item)
5753 {
5754 struct btrfs_device *device;
5755 u64 devid;
5756 int ret;
5757 u8 fs_uuid[BTRFS_UUID_SIZE];
5758 u8 dev_uuid[BTRFS_UUID_SIZE];
5759
5760 devid = btrfs_device_id(leaf, dev_item);
5761 read_extent_buffer(leaf, dev_uuid,
5762 (unsigned long)btrfs_device_uuid(dev_item),
5763 BTRFS_UUID_SIZE);
5764 read_extent_buffer(leaf, fs_uuid,
5765 (unsigned long)btrfs_device_fsid(dev_item),
5766 BTRFS_UUID_SIZE);
5767
5768 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5769 ret = open_seed_devices(root, fs_uuid);
5770 if (ret && !btrfs_test_opt(root, DEGRADED))
5771 return ret;
5772 }
5773
5774 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5775 if (!device || !device->bdev) {
5776 if (!btrfs_test_opt(root, DEGRADED))
5777 return -EIO;
5778
5779 if (!device) {
5780 btrfs_warn(root->fs_info, "devid %llu missing",
5781 (unsigned long long)devid);
5782 device = add_missing_dev(root, devid, dev_uuid);
5783 if (!device)
5784 return -ENOMEM;
5785 } else if (!device->missing) {
5786 /*
5787 * this happens when a device that was properly setup
5788 * in the device info lists suddenly goes bad.
5789 * device->bdev is NULL, and so we have to set
5790 * device->missing to one here
5791 */
5792 root->fs_info->fs_devices->missing_devices++;
5793 device->missing = 1;
5794 }
5795 }
5796
5797 if (device->fs_devices != root->fs_info->fs_devices) {
5798 BUG_ON(device->writeable);
5799 if (device->generation !=
5800 btrfs_device_generation(leaf, dev_item))
5801 return -EINVAL;
5802 }
5803
5804 fill_device_from_item(leaf, dev_item, device);
5805 device->in_fs_metadata = 1;
5806 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5807 device->fs_devices->total_rw_bytes += device->total_bytes;
5808 spin_lock(&root->fs_info->free_chunk_lock);
5809 root->fs_info->free_chunk_space += device->total_bytes -
5810 device->bytes_used;
5811 spin_unlock(&root->fs_info->free_chunk_lock);
5812 }
5813 ret = 0;
5814 return ret;
5815 }
5816
5817 int btrfs_read_sys_array(struct btrfs_root *root)
5818 {
5819 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5820 struct extent_buffer *sb;
5821 struct btrfs_disk_key *disk_key;
5822 struct btrfs_chunk *chunk;
5823 u8 *ptr;
5824 unsigned long sb_ptr;
5825 int ret = 0;
5826 u32 num_stripes;
5827 u32 array_size;
5828 u32 len = 0;
5829 u32 cur;
5830 struct btrfs_key key;
5831
5832 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5833 BTRFS_SUPER_INFO_SIZE);
5834 if (!sb)
5835 return -ENOMEM;
5836 btrfs_set_buffer_uptodate(sb);
5837 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5838 /*
5839 * The sb extent buffer is artifical and just used to read the system array.
5840 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5841 * pages up-to-date when the page is larger: extent does not cover the
5842 * whole page and consequently check_page_uptodate does not find all
5843 * the page's extents up-to-date (the hole beyond sb),
5844 * write_extent_buffer then triggers a WARN_ON.
5845 *
5846 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5847 * but sb spans only this function. Add an explicit SetPageUptodate call
5848 * to silence the warning eg. on PowerPC 64.
5849 */
5850 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5851 SetPageUptodate(sb->pages[0]);
5852
5853 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5854 array_size = btrfs_super_sys_array_size(super_copy);
5855
5856 ptr = super_copy->sys_chunk_array;
5857 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5858 cur = 0;
5859
5860 while (cur < array_size) {
5861 disk_key = (struct btrfs_disk_key *)ptr;
5862 btrfs_disk_key_to_cpu(&key, disk_key);
5863
5864 len = sizeof(*disk_key); ptr += len;
5865 sb_ptr += len;
5866 cur += len;
5867
5868 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5869 chunk = (struct btrfs_chunk *)sb_ptr;
5870 ret = read_one_chunk(root, &key, sb, chunk);
5871 if (ret)
5872 break;
5873 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5874 len = btrfs_chunk_item_size(num_stripes);
5875 } else {
5876 ret = -EIO;
5877 break;
5878 }
5879 ptr += len;
5880 sb_ptr += len;
5881 cur += len;
5882 }
5883 free_extent_buffer(sb);
5884 return ret;
5885 }
5886
5887 int btrfs_read_chunk_tree(struct btrfs_root *root)
5888 {
5889 struct btrfs_path *path;
5890 struct extent_buffer *leaf;
5891 struct btrfs_key key;
5892 struct btrfs_key found_key;
5893 int ret;
5894 int slot;
5895
5896 root = root->fs_info->chunk_root;
5897
5898 path = btrfs_alloc_path();
5899 if (!path)
5900 return -ENOMEM;
5901
5902 mutex_lock(&uuid_mutex);
5903 lock_chunks(root);
5904
5905 /*
5906 * Read all device items, and then all the chunk items. All
5907 * device items are found before any chunk item (their object id
5908 * is smaller than the lowest possible object id for a chunk
5909 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
5910 */
5911 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5912 key.offset = 0;
5913 key.type = 0;
5914 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5915 if (ret < 0)
5916 goto error;
5917 while (1) {
5918 leaf = path->nodes[0];
5919 slot = path->slots[0];
5920 if (slot >= btrfs_header_nritems(leaf)) {
5921 ret = btrfs_next_leaf(root, path);
5922 if (ret == 0)
5923 continue;
5924 if (ret < 0)
5925 goto error;
5926 break;
5927 }
5928 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5929 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5930 struct btrfs_dev_item *dev_item;
5931 dev_item = btrfs_item_ptr(leaf, slot,
5932 struct btrfs_dev_item);
5933 ret = read_one_dev(root, leaf, dev_item);
5934 if (ret)
5935 goto error;
5936 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5937 struct btrfs_chunk *chunk;
5938 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5939 ret = read_one_chunk(root, &found_key, leaf, chunk);
5940 if (ret)
5941 goto error;
5942 }
5943 path->slots[0]++;
5944 }
5945 ret = 0;
5946 error:
5947 unlock_chunks(root);
5948 mutex_unlock(&uuid_mutex);
5949
5950 btrfs_free_path(path);
5951 return ret;
5952 }
5953
5954 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
5955 {
5956 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5957 struct btrfs_device *device;
5958
5959 mutex_lock(&fs_devices->device_list_mutex);
5960 list_for_each_entry(device, &fs_devices->devices, dev_list)
5961 device->dev_root = fs_info->dev_root;
5962 mutex_unlock(&fs_devices->device_list_mutex);
5963 }
5964
5965 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5966 {
5967 int i;
5968
5969 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5970 btrfs_dev_stat_reset(dev, i);
5971 }
5972
5973 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5974 {
5975 struct btrfs_key key;
5976 struct btrfs_key found_key;
5977 struct btrfs_root *dev_root = fs_info->dev_root;
5978 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5979 struct extent_buffer *eb;
5980 int slot;
5981 int ret = 0;
5982 struct btrfs_device *device;
5983 struct btrfs_path *path = NULL;
5984 int i;
5985
5986 path = btrfs_alloc_path();
5987 if (!path) {
5988 ret = -ENOMEM;
5989 goto out;
5990 }
5991
5992 mutex_lock(&fs_devices->device_list_mutex);
5993 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5994 int item_size;
5995 struct btrfs_dev_stats_item *ptr;
5996
5997 key.objectid = 0;
5998 key.type = BTRFS_DEV_STATS_KEY;
5999 key.offset = device->devid;
6000 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6001 if (ret) {
6002 __btrfs_reset_dev_stats(device);
6003 device->dev_stats_valid = 1;
6004 btrfs_release_path(path);
6005 continue;
6006 }
6007 slot = path->slots[0];
6008 eb = path->nodes[0];
6009 btrfs_item_key_to_cpu(eb, &found_key, slot);
6010 item_size = btrfs_item_size_nr(eb, slot);
6011
6012 ptr = btrfs_item_ptr(eb, slot,
6013 struct btrfs_dev_stats_item);
6014
6015 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6016 if (item_size >= (1 + i) * sizeof(__le64))
6017 btrfs_dev_stat_set(device, i,
6018 btrfs_dev_stats_value(eb, ptr, i));
6019 else
6020 btrfs_dev_stat_reset(device, i);
6021 }
6022
6023 device->dev_stats_valid = 1;
6024 btrfs_dev_stat_print_on_load(device);
6025 btrfs_release_path(path);
6026 }
6027 mutex_unlock(&fs_devices->device_list_mutex);
6028
6029 out:
6030 btrfs_free_path(path);
6031 return ret < 0 ? ret : 0;
6032 }
6033
6034 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6035 struct btrfs_root *dev_root,
6036 struct btrfs_device *device)
6037 {
6038 struct btrfs_path *path;
6039 struct btrfs_key key;
6040 struct extent_buffer *eb;
6041 struct btrfs_dev_stats_item *ptr;
6042 int ret;
6043 int i;
6044
6045 key.objectid = 0;
6046 key.type = BTRFS_DEV_STATS_KEY;
6047 key.offset = device->devid;
6048
6049 path = btrfs_alloc_path();
6050 BUG_ON(!path);
6051 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6052 if (ret < 0) {
6053 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
6054 ret, rcu_str_deref(device->name));
6055 goto out;
6056 }
6057
6058 if (ret == 0 &&
6059 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6060 /* need to delete old one and insert a new one */
6061 ret = btrfs_del_item(trans, dev_root, path);
6062 if (ret != 0) {
6063 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
6064 rcu_str_deref(device->name), ret);
6065 goto out;
6066 }
6067 ret = 1;
6068 }
6069
6070 if (ret == 1) {
6071 /* need to insert a new item */
6072 btrfs_release_path(path);
6073 ret = btrfs_insert_empty_item(trans, dev_root, path,
6074 &key, sizeof(*ptr));
6075 if (ret < 0) {
6076 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
6077 rcu_str_deref(device->name), ret);
6078 goto out;
6079 }
6080 }
6081
6082 eb = path->nodes[0];
6083 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6084 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6085 btrfs_set_dev_stats_value(eb, ptr, i,
6086 btrfs_dev_stat_read(device, i));
6087 btrfs_mark_buffer_dirty(eb);
6088
6089 out:
6090 btrfs_free_path(path);
6091 return ret;
6092 }
6093
6094 /*
6095 * called from commit_transaction. Writes all changed device stats to disk.
6096 */
6097 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6098 struct btrfs_fs_info *fs_info)
6099 {
6100 struct btrfs_root *dev_root = fs_info->dev_root;
6101 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6102 struct btrfs_device *device;
6103 int ret = 0;
6104
6105 mutex_lock(&fs_devices->device_list_mutex);
6106 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6107 if (!device->dev_stats_valid || !device->dev_stats_dirty)
6108 continue;
6109
6110 ret = update_dev_stat_item(trans, dev_root, device);
6111 if (!ret)
6112 device->dev_stats_dirty = 0;
6113 }
6114 mutex_unlock(&fs_devices->device_list_mutex);
6115
6116 return ret;
6117 }
6118
6119 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6120 {
6121 btrfs_dev_stat_inc(dev, index);
6122 btrfs_dev_stat_print_on_error(dev);
6123 }
6124
6125 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6126 {
6127 if (!dev->dev_stats_valid)
6128 return;
6129 printk_ratelimited_in_rcu(KERN_ERR
6130 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6131 rcu_str_deref(dev->name),
6132 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6133 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6134 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6135 btrfs_dev_stat_read(dev,
6136 BTRFS_DEV_STAT_CORRUPTION_ERRS),
6137 btrfs_dev_stat_read(dev,
6138 BTRFS_DEV_STAT_GENERATION_ERRS));
6139 }
6140
6141 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6142 {
6143 int i;
6144
6145 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6146 if (btrfs_dev_stat_read(dev, i) != 0)
6147 break;
6148 if (i == BTRFS_DEV_STAT_VALUES_MAX)
6149 return; /* all values == 0, suppress message */
6150
6151 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6152 rcu_str_deref(dev->name),
6153 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6154 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6155 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6156 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6157 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6158 }
6159
6160 int btrfs_get_dev_stats(struct btrfs_root *root,
6161 struct btrfs_ioctl_get_dev_stats *stats)
6162 {
6163 struct btrfs_device *dev;
6164 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6165 int i;
6166
6167 mutex_lock(&fs_devices->device_list_mutex);
6168 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6169 mutex_unlock(&fs_devices->device_list_mutex);
6170
6171 if (!dev) {
6172 printk(KERN_WARNING
6173 "btrfs: get dev_stats failed, device not found\n");
6174 return -ENODEV;
6175 } else if (!dev->dev_stats_valid) {
6176 printk(KERN_WARNING
6177 "btrfs: get dev_stats failed, not yet valid\n");
6178 return -ENODEV;
6179 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6180 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6181 if (stats->nr_items > i)
6182 stats->values[i] =
6183 btrfs_dev_stat_read_and_reset(dev, i);
6184 else
6185 btrfs_dev_stat_reset(dev, i);
6186 }
6187 } else {
6188 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6189 if (stats->nr_items > i)
6190 stats->values[i] = btrfs_dev_stat_read(dev, i);
6191 }
6192 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6193 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6194 return 0;
6195 }
6196
6197 int btrfs_scratch_superblock(struct btrfs_device *device)
6198 {
6199 struct buffer_head *bh;
6200 struct btrfs_super_block *disk_super;
6201
6202 bh = btrfs_read_dev_super(device->bdev);
6203 if (!bh)
6204 return -EINVAL;
6205 disk_super = (struct btrfs_super_block *)bh->b_data;
6206
6207 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6208 set_buffer_dirty(bh);
6209 sync_dirty_buffer(bh);
6210 brelse(bh);
6211
6212 return 0;
6213 }
This page took 0.230068 seconds and 6 git commands to generate.