Btrfs: fix a build warning for an unused label
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39 #include "dev-replace.h"
40
41 static int init_first_rw_device(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 struct btrfs_device *device);
44 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
45 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
46 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47
48 static DEFINE_MUTEX(uuid_mutex);
49 static LIST_HEAD(fs_uuids);
50
51 static void lock_chunks(struct btrfs_root *root)
52 {
53 mutex_lock(&root->fs_info->chunk_mutex);
54 }
55
56 static void unlock_chunks(struct btrfs_root *root)
57 {
58 mutex_unlock(&root->fs_info->chunk_mutex);
59 }
60
61 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 {
63 struct btrfs_device *device;
64 WARN_ON(fs_devices->opened);
65 while (!list_empty(&fs_devices->devices)) {
66 device = list_entry(fs_devices->devices.next,
67 struct btrfs_device, dev_list);
68 list_del(&device->dev_list);
69 rcu_string_free(device->name);
70 kfree(device);
71 }
72 kfree(fs_devices);
73 }
74
75 void btrfs_cleanup_fs_uuids(void)
76 {
77 struct btrfs_fs_devices *fs_devices;
78
79 while (!list_empty(&fs_uuids)) {
80 fs_devices = list_entry(fs_uuids.next,
81 struct btrfs_fs_devices, list);
82 list_del(&fs_devices->list);
83 free_fs_devices(fs_devices);
84 }
85 }
86
87 static noinline struct btrfs_device *__find_device(struct list_head *head,
88 u64 devid, u8 *uuid)
89 {
90 struct btrfs_device *dev;
91
92 list_for_each_entry(dev, head, dev_list) {
93 if (dev->devid == devid &&
94 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
95 return dev;
96 }
97 }
98 return NULL;
99 }
100
101 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 {
103 struct btrfs_fs_devices *fs_devices;
104
105 list_for_each_entry(fs_devices, &fs_uuids, list) {
106 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
107 return fs_devices;
108 }
109 return NULL;
110 }
111
112 static int
113 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
114 int flush, struct block_device **bdev,
115 struct buffer_head **bh)
116 {
117 int ret;
118
119 *bdev = blkdev_get_by_path(device_path, flags, holder);
120
121 if (IS_ERR(*bdev)) {
122 ret = PTR_ERR(*bdev);
123 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
124 goto error;
125 }
126
127 if (flush)
128 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
129 ret = set_blocksize(*bdev, 4096);
130 if (ret) {
131 blkdev_put(*bdev, flags);
132 goto error;
133 }
134 invalidate_bdev(*bdev);
135 *bh = btrfs_read_dev_super(*bdev);
136 if (!*bh) {
137 ret = -EINVAL;
138 blkdev_put(*bdev, flags);
139 goto error;
140 }
141
142 return 0;
143
144 error:
145 *bdev = NULL;
146 *bh = NULL;
147 return ret;
148 }
149
150 static void requeue_list(struct btrfs_pending_bios *pending_bios,
151 struct bio *head, struct bio *tail)
152 {
153
154 struct bio *old_head;
155
156 old_head = pending_bios->head;
157 pending_bios->head = head;
158 if (pending_bios->tail)
159 tail->bi_next = old_head;
160 else
161 pending_bios->tail = tail;
162 }
163
164 /*
165 * we try to collect pending bios for a device so we don't get a large
166 * number of procs sending bios down to the same device. This greatly
167 * improves the schedulers ability to collect and merge the bios.
168 *
169 * But, it also turns into a long list of bios to process and that is sure
170 * to eventually make the worker thread block. The solution here is to
171 * make some progress and then put this work struct back at the end of
172 * the list if the block device is congested. This way, multiple devices
173 * can make progress from a single worker thread.
174 */
175 static noinline void run_scheduled_bios(struct btrfs_device *device)
176 {
177 struct bio *pending;
178 struct backing_dev_info *bdi;
179 struct btrfs_fs_info *fs_info;
180 struct btrfs_pending_bios *pending_bios;
181 struct bio *tail;
182 struct bio *cur;
183 int again = 0;
184 unsigned long num_run;
185 unsigned long batch_run = 0;
186 unsigned long limit;
187 unsigned long last_waited = 0;
188 int force_reg = 0;
189 int sync_pending = 0;
190 struct blk_plug plug;
191
192 /*
193 * this function runs all the bios we've collected for
194 * a particular device. We don't want to wander off to
195 * another device without first sending all of these down.
196 * So, setup a plug here and finish it off before we return
197 */
198 blk_start_plug(&plug);
199
200 bdi = blk_get_backing_dev_info(device->bdev);
201 fs_info = device->dev_root->fs_info;
202 limit = btrfs_async_submit_limit(fs_info);
203 limit = limit * 2 / 3;
204
205 loop:
206 spin_lock(&device->io_lock);
207
208 loop_lock:
209 num_run = 0;
210
211 /* take all the bios off the list at once and process them
212 * later on (without the lock held). But, remember the
213 * tail and other pointers so the bios can be properly reinserted
214 * into the list if we hit congestion
215 */
216 if (!force_reg && device->pending_sync_bios.head) {
217 pending_bios = &device->pending_sync_bios;
218 force_reg = 1;
219 } else {
220 pending_bios = &device->pending_bios;
221 force_reg = 0;
222 }
223
224 pending = pending_bios->head;
225 tail = pending_bios->tail;
226 WARN_ON(pending && !tail);
227
228 /*
229 * if pending was null this time around, no bios need processing
230 * at all and we can stop. Otherwise it'll loop back up again
231 * and do an additional check so no bios are missed.
232 *
233 * device->running_pending is used to synchronize with the
234 * schedule_bio code.
235 */
236 if (device->pending_sync_bios.head == NULL &&
237 device->pending_bios.head == NULL) {
238 again = 0;
239 device->running_pending = 0;
240 } else {
241 again = 1;
242 device->running_pending = 1;
243 }
244
245 pending_bios->head = NULL;
246 pending_bios->tail = NULL;
247
248 spin_unlock(&device->io_lock);
249
250 while (pending) {
251
252 rmb();
253 /* we want to work on both lists, but do more bios on the
254 * sync list than the regular list
255 */
256 if ((num_run > 32 &&
257 pending_bios != &device->pending_sync_bios &&
258 device->pending_sync_bios.head) ||
259 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
260 device->pending_bios.head)) {
261 spin_lock(&device->io_lock);
262 requeue_list(pending_bios, pending, tail);
263 goto loop_lock;
264 }
265
266 cur = pending;
267 pending = pending->bi_next;
268 cur->bi_next = NULL;
269
270 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
271 waitqueue_active(&fs_info->async_submit_wait))
272 wake_up(&fs_info->async_submit_wait);
273
274 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
275
276 /*
277 * if we're doing the sync list, record that our
278 * plug has some sync requests on it
279 *
280 * If we're doing the regular list and there are
281 * sync requests sitting around, unplug before
282 * we add more
283 */
284 if (pending_bios == &device->pending_sync_bios) {
285 sync_pending = 1;
286 } else if (sync_pending) {
287 blk_finish_plug(&plug);
288 blk_start_plug(&plug);
289 sync_pending = 0;
290 }
291
292 btrfsic_submit_bio(cur->bi_rw, cur);
293 num_run++;
294 batch_run++;
295 if (need_resched())
296 cond_resched();
297
298 /*
299 * we made progress, there is more work to do and the bdi
300 * is now congested. Back off and let other work structs
301 * run instead
302 */
303 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
304 fs_info->fs_devices->open_devices > 1) {
305 struct io_context *ioc;
306
307 ioc = current->io_context;
308
309 /*
310 * the main goal here is that we don't want to
311 * block if we're going to be able to submit
312 * more requests without blocking.
313 *
314 * This code does two great things, it pokes into
315 * the elevator code from a filesystem _and_
316 * it makes assumptions about how batching works.
317 */
318 if (ioc && ioc->nr_batch_requests > 0 &&
319 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
320 (last_waited == 0 ||
321 ioc->last_waited == last_waited)) {
322 /*
323 * we want to go through our batch of
324 * requests and stop. So, we copy out
325 * the ioc->last_waited time and test
326 * against it before looping
327 */
328 last_waited = ioc->last_waited;
329 if (need_resched())
330 cond_resched();
331 continue;
332 }
333 spin_lock(&device->io_lock);
334 requeue_list(pending_bios, pending, tail);
335 device->running_pending = 1;
336
337 spin_unlock(&device->io_lock);
338 btrfs_requeue_work(&device->work);
339 goto done;
340 }
341 /* unplug every 64 requests just for good measure */
342 if (batch_run % 64 == 0) {
343 blk_finish_plug(&plug);
344 blk_start_plug(&plug);
345 sync_pending = 0;
346 }
347 }
348
349 cond_resched();
350 if (again)
351 goto loop;
352
353 spin_lock(&device->io_lock);
354 if (device->pending_bios.head || device->pending_sync_bios.head)
355 goto loop_lock;
356 spin_unlock(&device->io_lock);
357
358 done:
359 blk_finish_plug(&plug);
360 }
361
362 static void pending_bios_fn(struct btrfs_work *work)
363 {
364 struct btrfs_device *device;
365
366 device = container_of(work, struct btrfs_device, work);
367 run_scheduled_bios(device);
368 }
369
370 static noinline int device_list_add(const char *path,
371 struct btrfs_super_block *disk_super,
372 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
373 {
374 struct btrfs_device *device;
375 struct btrfs_fs_devices *fs_devices;
376 struct rcu_string *name;
377 u64 found_transid = btrfs_super_generation(disk_super);
378
379 fs_devices = find_fsid(disk_super->fsid);
380 if (!fs_devices) {
381 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
382 if (!fs_devices)
383 return -ENOMEM;
384 INIT_LIST_HEAD(&fs_devices->devices);
385 INIT_LIST_HEAD(&fs_devices->alloc_list);
386 list_add(&fs_devices->list, &fs_uuids);
387 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
388 fs_devices->latest_devid = devid;
389 fs_devices->latest_trans = found_transid;
390 mutex_init(&fs_devices->device_list_mutex);
391 device = NULL;
392 } else {
393 device = __find_device(&fs_devices->devices, devid,
394 disk_super->dev_item.uuid);
395 }
396 if (!device) {
397 if (fs_devices->opened)
398 return -EBUSY;
399
400 device = kzalloc(sizeof(*device), GFP_NOFS);
401 if (!device) {
402 /* we can safely leave the fs_devices entry around */
403 return -ENOMEM;
404 }
405 device->devid = devid;
406 device->dev_stats_valid = 0;
407 device->work.func = pending_bios_fn;
408 memcpy(device->uuid, disk_super->dev_item.uuid,
409 BTRFS_UUID_SIZE);
410 spin_lock_init(&device->io_lock);
411
412 name = rcu_string_strdup(path, GFP_NOFS);
413 if (!name) {
414 kfree(device);
415 return -ENOMEM;
416 }
417 rcu_assign_pointer(device->name, name);
418 INIT_LIST_HEAD(&device->dev_alloc_list);
419
420 /* init readahead state */
421 spin_lock_init(&device->reada_lock);
422 device->reada_curr_zone = NULL;
423 atomic_set(&device->reada_in_flight, 0);
424 device->reada_next = 0;
425 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
426 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
427
428 mutex_lock(&fs_devices->device_list_mutex);
429 list_add_rcu(&device->dev_list, &fs_devices->devices);
430 mutex_unlock(&fs_devices->device_list_mutex);
431
432 device->fs_devices = fs_devices;
433 fs_devices->num_devices++;
434 } else if (!device->name || strcmp(device->name->str, path)) {
435 name = rcu_string_strdup(path, GFP_NOFS);
436 if (!name)
437 return -ENOMEM;
438 rcu_string_free(device->name);
439 rcu_assign_pointer(device->name, name);
440 if (device->missing) {
441 fs_devices->missing_devices--;
442 device->missing = 0;
443 }
444 }
445
446 if (found_transid > fs_devices->latest_trans) {
447 fs_devices->latest_devid = devid;
448 fs_devices->latest_trans = found_transid;
449 }
450 *fs_devices_ret = fs_devices;
451 return 0;
452 }
453
454 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
455 {
456 struct btrfs_fs_devices *fs_devices;
457 struct btrfs_device *device;
458 struct btrfs_device *orig_dev;
459
460 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
461 if (!fs_devices)
462 return ERR_PTR(-ENOMEM);
463
464 INIT_LIST_HEAD(&fs_devices->devices);
465 INIT_LIST_HEAD(&fs_devices->alloc_list);
466 INIT_LIST_HEAD(&fs_devices->list);
467 mutex_init(&fs_devices->device_list_mutex);
468 fs_devices->latest_devid = orig->latest_devid;
469 fs_devices->latest_trans = orig->latest_trans;
470 fs_devices->total_devices = orig->total_devices;
471 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
472
473 /* We have held the volume lock, it is safe to get the devices. */
474 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
475 struct rcu_string *name;
476
477 device = kzalloc(sizeof(*device), GFP_NOFS);
478 if (!device)
479 goto error;
480
481 /*
482 * This is ok to do without rcu read locked because we hold the
483 * uuid mutex so nothing we touch in here is going to disappear.
484 */
485 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
486 if (!name) {
487 kfree(device);
488 goto error;
489 }
490 rcu_assign_pointer(device->name, name);
491
492 device->devid = orig_dev->devid;
493 device->work.func = pending_bios_fn;
494 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
495 spin_lock_init(&device->io_lock);
496 INIT_LIST_HEAD(&device->dev_list);
497 INIT_LIST_HEAD(&device->dev_alloc_list);
498
499 list_add(&device->dev_list, &fs_devices->devices);
500 device->fs_devices = fs_devices;
501 fs_devices->num_devices++;
502 }
503 return fs_devices;
504 error:
505 free_fs_devices(fs_devices);
506 return ERR_PTR(-ENOMEM);
507 }
508
509 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
510 struct btrfs_fs_devices *fs_devices, int step)
511 {
512 struct btrfs_device *device, *next;
513
514 struct block_device *latest_bdev = NULL;
515 u64 latest_devid = 0;
516 u64 latest_transid = 0;
517
518 mutex_lock(&uuid_mutex);
519 again:
520 /* This is the initialized path, it is safe to release the devices. */
521 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
522 if (device->in_fs_metadata) {
523 if (!device->is_tgtdev_for_dev_replace &&
524 (!latest_transid ||
525 device->generation > latest_transid)) {
526 latest_devid = device->devid;
527 latest_transid = device->generation;
528 latest_bdev = device->bdev;
529 }
530 continue;
531 }
532
533 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
534 /*
535 * In the first step, keep the device which has
536 * the correct fsid and the devid that is used
537 * for the dev_replace procedure.
538 * In the second step, the dev_replace state is
539 * read from the device tree and it is known
540 * whether the procedure is really active or
541 * not, which means whether this device is
542 * used or whether it should be removed.
543 */
544 if (step == 0 || device->is_tgtdev_for_dev_replace) {
545 continue;
546 }
547 }
548 if (device->bdev) {
549 blkdev_put(device->bdev, device->mode);
550 device->bdev = NULL;
551 fs_devices->open_devices--;
552 }
553 if (device->writeable) {
554 list_del_init(&device->dev_alloc_list);
555 device->writeable = 0;
556 if (!device->is_tgtdev_for_dev_replace)
557 fs_devices->rw_devices--;
558 }
559 list_del_init(&device->dev_list);
560 fs_devices->num_devices--;
561 rcu_string_free(device->name);
562 kfree(device);
563 }
564
565 if (fs_devices->seed) {
566 fs_devices = fs_devices->seed;
567 goto again;
568 }
569
570 fs_devices->latest_bdev = latest_bdev;
571 fs_devices->latest_devid = latest_devid;
572 fs_devices->latest_trans = latest_transid;
573
574 mutex_unlock(&uuid_mutex);
575 }
576
577 static void __free_device(struct work_struct *work)
578 {
579 struct btrfs_device *device;
580
581 device = container_of(work, struct btrfs_device, rcu_work);
582
583 if (device->bdev)
584 blkdev_put(device->bdev, device->mode);
585
586 rcu_string_free(device->name);
587 kfree(device);
588 }
589
590 static void free_device(struct rcu_head *head)
591 {
592 struct btrfs_device *device;
593
594 device = container_of(head, struct btrfs_device, rcu);
595
596 INIT_WORK(&device->rcu_work, __free_device);
597 schedule_work(&device->rcu_work);
598 }
599
600 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
601 {
602 struct btrfs_device *device;
603
604 if (--fs_devices->opened > 0)
605 return 0;
606
607 mutex_lock(&fs_devices->device_list_mutex);
608 list_for_each_entry(device, &fs_devices->devices, dev_list) {
609 struct btrfs_device *new_device;
610 struct rcu_string *name;
611
612 if (device->bdev)
613 fs_devices->open_devices--;
614
615 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
616 list_del_init(&device->dev_alloc_list);
617 fs_devices->rw_devices--;
618 }
619
620 if (device->can_discard)
621 fs_devices->num_can_discard--;
622
623 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
624 BUG_ON(!new_device); /* -ENOMEM */
625 memcpy(new_device, device, sizeof(*new_device));
626
627 /* Safe because we are under uuid_mutex */
628 if (device->name) {
629 name = rcu_string_strdup(device->name->str, GFP_NOFS);
630 BUG_ON(device->name && !name); /* -ENOMEM */
631 rcu_assign_pointer(new_device->name, name);
632 }
633 new_device->bdev = NULL;
634 new_device->writeable = 0;
635 new_device->in_fs_metadata = 0;
636 new_device->can_discard = 0;
637 list_replace_rcu(&device->dev_list, &new_device->dev_list);
638
639 call_rcu(&device->rcu, free_device);
640 }
641 mutex_unlock(&fs_devices->device_list_mutex);
642
643 WARN_ON(fs_devices->open_devices);
644 WARN_ON(fs_devices->rw_devices);
645 fs_devices->opened = 0;
646 fs_devices->seeding = 0;
647
648 return 0;
649 }
650
651 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
652 {
653 struct btrfs_fs_devices *seed_devices = NULL;
654 int ret;
655
656 mutex_lock(&uuid_mutex);
657 ret = __btrfs_close_devices(fs_devices);
658 if (!fs_devices->opened) {
659 seed_devices = fs_devices->seed;
660 fs_devices->seed = NULL;
661 }
662 mutex_unlock(&uuid_mutex);
663
664 while (seed_devices) {
665 fs_devices = seed_devices;
666 seed_devices = fs_devices->seed;
667 __btrfs_close_devices(fs_devices);
668 free_fs_devices(fs_devices);
669 }
670 return ret;
671 }
672
673 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
674 fmode_t flags, void *holder)
675 {
676 struct request_queue *q;
677 struct block_device *bdev;
678 struct list_head *head = &fs_devices->devices;
679 struct btrfs_device *device;
680 struct block_device *latest_bdev = NULL;
681 struct buffer_head *bh;
682 struct btrfs_super_block *disk_super;
683 u64 latest_devid = 0;
684 u64 latest_transid = 0;
685 u64 devid;
686 int seeding = 1;
687 int ret = 0;
688
689 flags |= FMODE_EXCL;
690
691 list_for_each_entry(device, head, dev_list) {
692 if (device->bdev)
693 continue;
694 if (!device->name)
695 continue;
696
697 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
698 &bdev, &bh);
699 if (ret)
700 continue;
701
702 disk_super = (struct btrfs_super_block *)bh->b_data;
703 devid = btrfs_stack_device_id(&disk_super->dev_item);
704 if (devid != device->devid)
705 goto error_brelse;
706
707 if (memcmp(device->uuid, disk_super->dev_item.uuid,
708 BTRFS_UUID_SIZE))
709 goto error_brelse;
710
711 device->generation = btrfs_super_generation(disk_super);
712 if (!latest_transid || device->generation > latest_transid) {
713 latest_devid = devid;
714 latest_transid = device->generation;
715 latest_bdev = bdev;
716 }
717
718 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
719 device->writeable = 0;
720 } else {
721 device->writeable = !bdev_read_only(bdev);
722 seeding = 0;
723 }
724
725 q = bdev_get_queue(bdev);
726 if (blk_queue_discard(q)) {
727 device->can_discard = 1;
728 fs_devices->num_can_discard++;
729 }
730
731 device->bdev = bdev;
732 device->in_fs_metadata = 0;
733 device->mode = flags;
734
735 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
736 fs_devices->rotating = 1;
737
738 fs_devices->open_devices++;
739 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
740 fs_devices->rw_devices++;
741 list_add(&device->dev_alloc_list,
742 &fs_devices->alloc_list);
743 }
744 brelse(bh);
745 continue;
746
747 error_brelse:
748 brelse(bh);
749 blkdev_put(bdev, flags);
750 continue;
751 }
752 if (fs_devices->open_devices == 0) {
753 ret = -EINVAL;
754 goto out;
755 }
756 fs_devices->seeding = seeding;
757 fs_devices->opened = 1;
758 fs_devices->latest_bdev = latest_bdev;
759 fs_devices->latest_devid = latest_devid;
760 fs_devices->latest_trans = latest_transid;
761 fs_devices->total_rw_bytes = 0;
762 out:
763 return ret;
764 }
765
766 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
767 fmode_t flags, void *holder)
768 {
769 int ret;
770
771 mutex_lock(&uuid_mutex);
772 if (fs_devices->opened) {
773 fs_devices->opened++;
774 ret = 0;
775 } else {
776 ret = __btrfs_open_devices(fs_devices, flags, holder);
777 }
778 mutex_unlock(&uuid_mutex);
779 return ret;
780 }
781
782 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
783 struct btrfs_fs_devices **fs_devices_ret)
784 {
785 struct btrfs_super_block *disk_super;
786 struct block_device *bdev;
787 struct buffer_head *bh;
788 int ret;
789 u64 devid;
790 u64 transid;
791 u64 total_devices;
792
793 flags |= FMODE_EXCL;
794 mutex_lock(&uuid_mutex);
795 ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
796 if (ret)
797 goto error;
798 disk_super = (struct btrfs_super_block *)bh->b_data;
799 devid = btrfs_stack_device_id(&disk_super->dev_item);
800 transid = btrfs_super_generation(disk_super);
801 total_devices = btrfs_super_num_devices(disk_super);
802 if (disk_super->label[0]) {
803 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
804 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
805 printk(KERN_INFO "device label %s ", disk_super->label);
806 } else {
807 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
808 }
809 printk(KERN_CONT "devid %llu transid %llu %s\n",
810 (unsigned long long)devid, (unsigned long long)transid, path);
811 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
812 if (!ret && fs_devices_ret)
813 (*fs_devices_ret)->total_devices = total_devices;
814 brelse(bh);
815 blkdev_put(bdev, flags);
816 error:
817 mutex_unlock(&uuid_mutex);
818 return ret;
819 }
820
821 /* helper to account the used device space in the range */
822 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
823 u64 end, u64 *length)
824 {
825 struct btrfs_key key;
826 struct btrfs_root *root = device->dev_root;
827 struct btrfs_dev_extent *dev_extent;
828 struct btrfs_path *path;
829 u64 extent_end;
830 int ret;
831 int slot;
832 struct extent_buffer *l;
833
834 *length = 0;
835
836 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
837 return 0;
838
839 path = btrfs_alloc_path();
840 if (!path)
841 return -ENOMEM;
842 path->reada = 2;
843
844 key.objectid = device->devid;
845 key.offset = start;
846 key.type = BTRFS_DEV_EXTENT_KEY;
847
848 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
849 if (ret < 0)
850 goto out;
851 if (ret > 0) {
852 ret = btrfs_previous_item(root, path, key.objectid, key.type);
853 if (ret < 0)
854 goto out;
855 }
856
857 while (1) {
858 l = path->nodes[0];
859 slot = path->slots[0];
860 if (slot >= btrfs_header_nritems(l)) {
861 ret = btrfs_next_leaf(root, path);
862 if (ret == 0)
863 continue;
864 if (ret < 0)
865 goto out;
866
867 break;
868 }
869 btrfs_item_key_to_cpu(l, &key, slot);
870
871 if (key.objectid < device->devid)
872 goto next;
873
874 if (key.objectid > device->devid)
875 break;
876
877 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
878 goto next;
879
880 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
881 extent_end = key.offset + btrfs_dev_extent_length(l,
882 dev_extent);
883 if (key.offset <= start && extent_end > end) {
884 *length = end - start + 1;
885 break;
886 } else if (key.offset <= start && extent_end > start)
887 *length += extent_end - start;
888 else if (key.offset > start && extent_end <= end)
889 *length += extent_end - key.offset;
890 else if (key.offset > start && key.offset <= end) {
891 *length += end - key.offset + 1;
892 break;
893 } else if (key.offset > end)
894 break;
895
896 next:
897 path->slots[0]++;
898 }
899 ret = 0;
900 out:
901 btrfs_free_path(path);
902 return ret;
903 }
904
905 /*
906 * find_free_dev_extent - find free space in the specified device
907 * @device: the device which we search the free space in
908 * @num_bytes: the size of the free space that we need
909 * @start: store the start of the free space.
910 * @len: the size of the free space. that we find, or the size of the max
911 * free space if we don't find suitable free space
912 *
913 * this uses a pretty simple search, the expectation is that it is
914 * called very infrequently and that a given device has a small number
915 * of extents
916 *
917 * @start is used to store the start of the free space if we find. But if we
918 * don't find suitable free space, it will be used to store the start position
919 * of the max free space.
920 *
921 * @len is used to store the size of the free space that we find.
922 * But if we don't find suitable free space, it is used to store the size of
923 * the max free space.
924 */
925 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
926 u64 *start, u64 *len)
927 {
928 struct btrfs_key key;
929 struct btrfs_root *root = device->dev_root;
930 struct btrfs_dev_extent *dev_extent;
931 struct btrfs_path *path;
932 u64 hole_size;
933 u64 max_hole_start;
934 u64 max_hole_size;
935 u64 extent_end;
936 u64 search_start;
937 u64 search_end = device->total_bytes;
938 int ret;
939 int slot;
940 struct extent_buffer *l;
941
942 /* FIXME use last free of some kind */
943
944 /* we don't want to overwrite the superblock on the drive,
945 * so we make sure to start at an offset of at least 1MB
946 */
947 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
948
949 max_hole_start = search_start;
950 max_hole_size = 0;
951 hole_size = 0;
952
953 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
954 ret = -ENOSPC;
955 goto error;
956 }
957
958 path = btrfs_alloc_path();
959 if (!path) {
960 ret = -ENOMEM;
961 goto error;
962 }
963 path->reada = 2;
964
965 key.objectid = device->devid;
966 key.offset = search_start;
967 key.type = BTRFS_DEV_EXTENT_KEY;
968
969 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
970 if (ret < 0)
971 goto out;
972 if (ret > 0) {
973 ret = btrfs_previous_item(root, path, key.objectid, key.type);
974 if (ret < 0)
975 goto out;
976 }
977
978 while (1) {
979 l = path->nodes[0];
980 slot = path->slots[0];
981 if (slot >= btrfs_header_nritems(l)) {
982 ret = btrfs_next_leaf(root, path);
983 if (ret == 0)
984 continue;
985 if (ret < 0)
986 goto out;
987
988 break;
989 }
990 btrfs_item_key_to_cpu(l, &key, slot);
991
992 if (key.objectid < device->devid)
993 goto next;
994
995 if (key.objectid > device->devid)
996 break;
997
998 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
999 goto next;
1000
1001 if (key.offset > search_start) {
1002 hole_size = key.offset - search_start;
1003
1004 if (hole_size > max_hole_size) {
1005 max_hole_start = search_start;
1006 max_hole_size = hole_size;
1007 }
1008
1009 /*
1010 * If this free space is greater than which we need,
1011 * it must be the max free space that we have found
1012 * until now, so max_hole_start must point to the start
1013 * of this free space and the length of this free space
1014 * is stored in max_hole_size. Thus, we return
1015 * max_hole_start and max_hole_size and go back to the
1016 * caller.
1017 */
1018 if (hole_size >= num_bytes) {
1019 ret = 0;
1020 goto out;
1021 }
1022 }
1023
1024 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1025 extent_end = key.offset + btrfs_dev_extent_length(l,
1026 dev_extent);
1027 if (extent_end > search_start)
1028 search_start = extent_end;
1029 next:
1030 path->slots[0]++;
1031 cond_resched();
1032 }
1033
1034 /*
1035 * At this point, search_start should be the end of
1036 * allocated dev extents, and when shrinking the device,
1037 * search_end may be smaller than search_start.
1038 */
1039 if (search_end > search_start)
1040 hole_size = search_end - search_start;
1041
1042 if (hole_size > max_hole_size) {
1043 max_hole_start = search_start;
1044 max_hole_size = hole_size;
1045 }
1046
1047 /* See above. */
1048 if (hole_size < num_bytes)
1049 ret = -ENOSPC;
1050 else
1051 ret = 0;
1052
1053 out:
1054 btrfs_free_path(path);
1055 error:
1056 *start = max_hole_start;
1057 if (len)
1058 *len = max_hole_size;
1059 return ret;
1060 }
1061
1062 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1063 struct btrfs_device *device,
1064 u64 start)
1065 {
1066 int ret;
1067 struct btrfs_path *path;
1068 struct btrfs_root *root = device->dev_root;
1069 struct btrfs_key key;
1070 struct btrfs_key found_key;
1071 struct extent_buffer *leaf = NULL;
1072 struct btrfs_dev_extent *extent = NULL;
1073
1074 path = btrfs_alloc_path();
1075 if (!path)
1076 return -ENOMEM;
1077
1078 key.objectid = device->devid;
1079 key.offset = start;
1080 key.type = BTRFS_DEV_EXTENT_KEY;
1081 again:
1082 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1083 if (ret > 0) {
1084 ret = btrfs_previous_item(root, path, key.objectid,
1085 BTRFS_DEV_EXTENT_KEY);
1086 if (ret)
1087 goto out;
1088 leaf = path->nodes[0];
1089 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1090 extent = btrfs_item_ptr(leaf, path->slots[0],
1091 struct btrfs_dev_extent);
1092 BUG_ON(found_key.offset > start || found_key.offset +
1093 btrfs_dev_extent_length(leaf, extent) < start);
1094 key = found_key;
1095 btrfs_release_path(path);
1096 goto again;
1097 } else if (ret == 0) {
1098 leaf = path->nodes[0];
1099 extent = btrfs_item_ptr(leaf, path->slots[0],
1100 struct btrfs_dev_extent);
1101 } else {
1102 btrfs_error(root->fs_info, ret, "Slot search failed");
1103 goto out;
1104 }
1105
1106 if (device->bytes_used > 0) {
1107 u64 len = btrfs_dev_extent_length(leaf, extent);
1108 device->bytes_used -= len;
1109 spin_lock(&root->fs_info->free_chunk_lock);
1110 root->fs_info->free_chunk_space += len;
1111 spin_unlock(&root->fs_info->free_chunk_lock);
1112 }
1113 ret = btrfs_del_item(trans, root, path);
1114 if (ret) {
1115 btrfs_error(root->fs_info, ret,
1116 "Failed to remove dev extent item");
1117 }
1118 out:
1119 btrfs_free_path(path);
1120 return ret;
1121 }
1122
1123 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1124 struct btrfs_device *device,
1125 u64 chunk_tree, u64 chunk_objectid,
1126 u64 chunk_offset, u64 start, u64 num_bytes)
1127 {
1128 int ret;
1129 struct btrfs_path *path;
1130 struct btrfs_root *root = device->dev_root;
1131 struct btrfs_dev_extent *extent;
1132 struct extent_buffer *leaf;
1133 struct btrfs_key key;
1134
1135 WARN_ON(!device->in_fs_metadata);
1136 WARN_ON(device->is_tgtdev_for_dev_replace);
1137 path = btrfs_alloc_path();
1138 if (!path)
1139 return -ENOMEM;
1140
1141 key.objectid = device->devid;
1142 key.offset = start;
1143 key.type = BTRFS_DEV_EXTENT_KEY;
1144 ret = btrfs_insert_empty_item(trans, root, path, &key,
1145 sizeof(*extent));
1146 if (ret)
1147 goto out;
1148
1149 leaf = path->nodes[0];
1150 extent = btrfs_item_ptr(leaf, path->slots[0],
1151 struct btrfs_dev_extent);
1152 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1153 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1154 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1155
1156 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1157 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1158 BTRFS_UUID_SIZE);
1159
1160 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1161 btrfs_mark_buffer_dirty(leaf);
1162 out:
1163 btrfs_free_path(path);
1164 return ret;
1165 }
1166
1167 static noinline int find_next_chunk(struct btrfs_root *root,
1168 u64 objectid, u64 *offset)
1169 {
1170 struct btrfs_path *path;
1171 int ret;
1172 struct btrfs_key key;
1173 struct btrfs_chunk *chunk;
1174 struct btrfs_key found_key;
1175
1176 path = btrfs_alloc_path();
1177 if (!path)
1178 return -ENOMEM;
1179
1180 key.objectid = objectid;
1181 key.offset = (u64)-1;
1182 key.type = BTRFS_CHUNK_ITEM_KEY;
1183
1184 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1185 if (ret < 0)
1186 goto error;
1187
1188 BUG_ON(ret == 0); /* Corruption */
1189
1190 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1191 if (ret) {
1192 *offset = 0;
1193 } else {
1194 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1195 path->slots[0]);
1196 if (found_key.objectid != objectid)
1197 *offset = 0;
1198 else {
1199 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1200 struct btrfs_chunk);
1201 *offset = found_key.offset +
1202 btrfs_chunk_length(path->nodes[0], chunk);
1203 }
1204 }
1205 ret = 0;
1206 error:
1207 btrfs_free_path(path);
1208 return ret;
1209 }
1210
1211 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1212 {
1213 int ret;
1214 struct btrfs_key key;
1215 struct btrfs_key found_key;
1216 struct btrfs_path *path;
1217
1218 root = root->fs_info->chunk_root;
1219
1220 path = btrfs_alloc_path();
1221 if (!path)
1222 return -ENOMEM;
1223
1224 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1225 key.type = BTRFS_DEV_ITEM_KEY;
1226 key.offset = (u64)-1;
1227
1228 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1229 if (ret < 0)
1230 goto error;
1231
1232 BUG_ON(ret == 0); /* Corruption */
1233
1234 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1235 BTRFS_DEV_ITEM_KEY);
1236 if (ret) {
1237 *objectid = 1;
1238 } else {
1239 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1240 path->slots[0]);
1241 *objectid = found_key.offset + 1;
1242 }
1243 ret = 0;
1244 error:
1245 btrfs_free_path(path);
1246 return ret;
1247 }
1248
1249 /*
1250 * the device information is stored in the chunk root
1251 * the btrfs_device struct should be fully filled in
1252 */
1253 int btrfs_add_device(struct btrfs_trans_handle *trans,
1254 struct btrfs_root *root,
1255 struct btrfs_device *device)
1256 {
1257 int ret;
1258 struct btrfs_path *path;
1259 struct btrfs_dev_item *dev_item;
1260 struct extent_buffer *leaf;
1261 struct btrfs_key key;
1262 unsigned long ptr;
1263
1264 root = root->fs_info->chunk_root;
1265
1266 path = btrfs_alloc_path();
1267 if (!path)
1268 return -ENOMEM;
1269
1270 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1271 key.type = BTRFS_DEV_ITEM_KEY;
1272 key.offset = device->devid;
1273
1274 ret = btrfs_insert_empty_item(trans, root, path, &key,
1275 sizeof(*dev_item));
1276 if (ret)
1277 goto out;
1278
1279 leaf = path->nodes[0];
1280 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1281
1282 btrfs_set_device_id(leaf, dev_item, device->devid);
1283 btrfs_set_device_generation(leaf, dev_item, 0);
1284 btrfs_set_device_type(leaf, dev_item, device->type);
1285 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1286 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1287 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1288 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1289 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1290 btrfs_set_device_group(leaf, dev_item, 0);
1291 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1292 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1293 btrfs_set_device_start_offset(leaf, dev_item, 0);
1294
1295 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1296 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1297 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1298 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1299 btrfs_mark_buffer_dirty(leaf);
1300
1301 ret = 0;
1302 out:
1303 btrfs_free_path(path);
1304 return ret;
1305 }
1306
1307 static int btrfs_rm_dev_item(struct btrfs_root *root,
1308 struct btrfs_device *device)
1309 {
1310 int ret;
1311 struct btrfs_path *path;
1312 struct btrfs_key key;
1313 struct btrfs_trans_handle *trans;
1314
1315 root = root->fs_info->chunk_root;
1316
1317 path = btrfs_alloc_path();
1318 if (!path)
1319 return -ENOMEM;
1320
1321 trans = btrfs_start_transaction(root, 0);
1322 if (IS_ERR(trans)) {
1323 btrfs_free_path(path);
1324 return PTR_ERR(trans);
1325 }
1326 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1327 key.type = BTRFS_DEV_ITEM_KEY;
1328 key.offset = device->devid;
1329 lock_chunks(root);
1330
1331 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1332 if (ret < 0)
1333 goto out;
1334
1335 if (ret > 0) {
1336 ret = -ENOENT;
1337 goto out;
1338 }
1339
1340 ret = btrfs_del_item(trans, root, path);
1341 if (ret)
1342 goto out;
1343 out:
1344 btrfs_free_path(path);
1345 unlock_chunks(root);
1346 btrfs_commit_transaction(trans, root);
1347 return ret;
1348 }
1349
1350 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1351 {
1352 struct btrfs_device *device;
1353 struct btrfs_device *next_device;
1354 struct block_device *bdev;
1355 struct buffer_head *bh = NULL;
1356 struct btrfs_super_block *disk_super;
1357 struct btrfs_fs_devices *cur_devices;
1358 u64 all_avail;
1359 u64 devid;
1360 u64 num_devices;
1361 u8 *dev_uuid;
1362 int ret = 0;
1363 bool clear_super = false;
1364
1365 mutex_lock(&uuid_mutex);
1366
1367 all_avail = root->fs_info->avail_data_alloc_bits |
1368 root->fs_info->avail_system_alloc_bits |
1369 root->fs_info->avail_metadata_alloc_bits;
1370
1371 num_devices = root->fs_info->fs_devices->num_devices;
1372 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1373 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1374 WARN_ON(num_devices < 1);
1375 num_devices--;
1376 }
1377 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1378
1379 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1380 printk(KERN_ERR "btrfs: unable to go below four devices "
1381 "on raid10\n");
1382 ret = -EINVAL;
1383 goto out;
1384 }
1385
1386 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1387 printk(KERN_ERR "btrfs: unable to go below two "
1388 "devices on raid1\n");
1389 ret = -EINVAL;
1390 goto out;
1391 }
1392
1393 if (strcmp(device_path, "missing") == 0) {
1394 struct list_head *devices;
1395 struct btrfs_device *tmp;
1396
1397 device = NULL;
1398 devices = &root->fs_info->fs_devices->devices;
1399 /*
1400 * It is safe to read the devices since the volume_mutex
1401 * is held.
1402 */
1403 list_for_each_entry(tmp, devices, dev_list) {
1404 if (tmp->in_fs_metadata &&
1405 !tmp->is_tgtdev_for_dev_replace &&
1406 !tmp->bdev) {
1407 device = tmp;
1408 break;
1409 }
1410 }
1411 bdev = NULL;
1412 bh = NULL;
1413 disk_super = NULL;
1414 if (!device) {
1415 printk(KERN_ERR "btrfs: no missing devices found to "
1416 "remove\n");
1417 goto out;
1418 }
1419 } else {
1420 ret = btrfs_get_bdev_and_sb(device_path,
1421 FMODE_READ | FMODE_EXCL,
1422 root->fs_info->bdev_holder, 0,
1423 &bdev, &bh);
1424 if (ret)
1425 goto out;
1426 disk_super = (struct btrfs_super_block *)bh->b_data;
1427 devid = btrfs_stack_device_id(&disk_super->dev_item);
1428 dev_uuid = disk_super->dev_item.uuid;
1429 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1430 disk_super->fsid);
1431 if (!device) {
1432 ret = -ENOENT;
1433 goto error_brelse;
1434 }
1435 }
1436
1437 if (device->is_tgtdev_for_dev_replace) {
1438 pr_err("btrfs: unable to remove the dev_replace target dev\n");
1439 ret = -EINVAL;
1440 goto error_brelse;
1441 }
1442
1443 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1444 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1445 "device\n");
1446 ret = -EINVAL;
1447 goto error_brelse;
1448 }
1449
1450 if (device->writeable) {
1451 lock_chunks(root);
1452 list_del_init(&device->dev_alloc_list);
1453 unlock_chunks(root);
1454 root->fs_info->fs_devices->rw_devices--;
1455 clear_super = true;
1456 }
1457
1458 ret = btrfs_shrink_device(device, 0);
1459 if (ret)
1460 goto error_undo;
1461
1462 /*
1463 * TODO: the superblock still includes this device in its num_devices
1464 * counter although write_all_supers() is not locked out. This
1465 * could give a filesystem state which requires a degraded mount.
1466 */
1467 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1468 if (ret)
1469 goto error_undo;
1470
1471 spin_lock(&root->fs_info->free_chunk_lock);
1472 root->fs_info->free_chunk_space = device->total_bytes -
1473 device->bytes_used;
1474 spin_unlock(&root->fs_info->free_chunk_lock);
1475
1476 device->in_fs_metadata = 0;
1477 btrfs_scrub_cancel_dev(root->fs_info, device);
1478
1479 /*
1480 * the device list mutex makes sure that we don't change
1481 * the device list while someone else is writing out all
1482 * the device supers.
1483 */
1484
1485 cur_devices = device->fs_devices;
1486 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1487 list_del_rcu(&device->dev_list);
1488
1489 device->fs_devices->num_devices--;
1490 device->fs_devices->total_devices--;
1491
1492 if (device->missing)
1493 root->fs_info->fs_devices->missing_devices--;
1494
1495 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1496 struct btrfs_device, dev_list);
1497 if (device->bdev == root->fs_info->sb->s_bdev)
1498 root->fs_info->sb->s_bdev = next_device->bdev;
1499 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1500 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1501
1502 if (device->bdev)
1503 device->fs_devices->open_devices--;
1504
1505 call_rcu(&device->rcu, free_device);
1506 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1507
1508 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1509 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1510
1511 if (cur_devices->open_devices == 0) {
1512 struct btrfs_fs_devices *fs_devices;
1513 fs_devices = root->fs_info->fs_devices;
1514 while (fs_devices) {
1515 if (fs_devices->seed == cur_devices)
1516 break;
1517 fs_devices = fs_devices->seed;
1518 }
1519 fs_devices->seed = cur_devices->seed;
1520 cur_devices->seed = NULL;
1521 lock_chunks(root);
1522 __btrfs_close_devices(cur_devices);
1523 unlock_chunks(root);
1524 free_fs_devices(cur_devices);
1525 }
1526
1527 root->fs_info->num_tolerated_disk_barrier_failures =
1528 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1529
1530 /*
1531 * at this point, the device is zero sized. We want to
1532 * remove it from the devices list and zero out the old super
1533 */
1534 if (clear_super && disk_super) {
1535 /* make sure this device isn't detected as part of
1536 * the FS anymore
1537 */
1538 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1539 set_buffer_dirty(bh);
1540 sync_dirty_buffer(bh);
1541 }
1542
1543 ret = 0;
1544
1545 error_brelse:
1546 brelse(bh);
1547 if (bdev)
1548 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1549 out:
1550 mutex_unlock(&uuid_mutex);
1551 return ret;
1552 error_undo:
1553 if (device->writeable) {
1554 lock_chunks(root);
1555 list_add(&device->dev_alloc_list,
1556 &root->fs_info->fs_devices->alloc_list);
1557 unlock_chunks(root);
1558 root->fs_info->fs_devices->rw_devices++;
1559 }
1560 goto error_brelse;
1561 }
1562
1563 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1564 struct btrfs_device *srcdev)
1565 {
1566 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1567 list_del_rcu(&srcdev->dev_list);
1568 list_del_rcu(&srcdev->dev_alloc_list);
1569 fs_info->fs_devices->num_devices--;
1570 if (srcdev->missing) {
1571 fs_info->fs_devices->missing_devices--;
1572 fs_info->fs_devices->rw_devices++;
1573 }
1574 if (srcdev->can_discard)
1575 fs_info->fs_devices->num_can_discard--;
1576 if (srcdev->bdev)
1577 fs_info->fs_devices->open_devices--;
1578
1579 call_rcu(&srcdev->rcu, free_device);
1580 }
1581
1582 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1583 struct btrfs_device *tgtdev)
1584 {
1585 struct btrfs_device *next_device;
1586
1587 WARN_ON(!tgtdev);
1588 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1589 if (tgtdev->bdev) {
1590 btrfs_scratch_superblock(tgtdev);
1591 fs_info->fs_devices->open_devices--;
1592 }
1593 fs_info->fs_devices->num_devices--;
1594 if (tgtdev->can_discard)
1595 fs_info->fs_devices->num_can_discard++;
1596
1597 next_device = list_entry(fs_info->fs_devices->devices.next,
1598 struct btrfs_device, dev_list);
1599 if (tgtdev->bdev == fs_info->sb->s_bdev)
1600 fs_info->sb->s_bdev = next_device->bdev;
1601 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1602 fs_info->fs_devices->latest_bdev = next_device->bdev;
1603 list_del_rcu(&tgtdev->dev_list);
1604
1605 call_rcu(&tgtdev->rcu, free_device);
1606
1607 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1608 }
1609
1610 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1611 struct btrfs_device **device)
1612 {
1613 int ret = 0;
1614 struct btrfs_super_block *disk_super;
1615 u64 devid;
1616 u8 *dev_uuid;
1617 struct block_device *bdev;
1618 struct buffer_head *bh;
1619
1620 *device = NULL;
1621 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1622 root->fs_info->bdev_holder, 0, &bdev, &bh);
1623 if (ret)
1624 return ret;
1625 disk_super = (struct btrfs_super_block *)bh->b_data;
1626 devid = btrfs_stack_device_id(&disk_super->dev_item);
1627 dev_uuid = disk_super->dev_item.uuid;
1628 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1629 disk_super->fsid);
1630 brelse(bh);
1631 if (!*device)
1632 ret = -ENOENT;
1633 blkdev_put(bdev, FMODE_READ);
1634 return ret;
1635 }
1636
1637 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1638 char *device_path,
1639 struct btrfs_device **device)
1640 {
1641 *device = NULL;
1642 if (strcmp(device_path, "missing") == 0) {
1643 struct list_head *devices;
1644 struct btrfs_device *tmp;
1645
1646 devices = &root->fs_info->fs_devices->devices;
1647 /*
1648 * It is safe to read the devices since the volume_mutex
1649 * is held by the caller.
1650 */
1651 list_for_each_entry(tmp, devices, dev_list) {
1652 if (tmp->in_fs_metadata && !tmp->bdev) {
1653 *device = tmp;
1654 break;
1655 }
1656 }
1657
1658 if (!*device) {
1659 pr_err("btrfs: no missing device found\n");
1660 return -ENOENT;
1661 }
1662
1663 return 0;
1664 } else {
1665 return btrfs_find_device_by_path(root, device_path, device);
1666 }
1667 }
1668
1669 /*
1670 * does all the dirty work required for changing file system's UUID.
1671 */
1672 static int btrfs_prepare_sprout(struct btrfs_root *root)
1673 {
1674 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1675 struct btrfs_fs_devices *old_devices;
1676 struct btrfs_fs_devices *seed_devices;
1677 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1678 struct btrfs_device *device;
1679 u64 super_flags;
1680
1681 BUG_ON(!mutex_is_locked(&uuid_mutex));
1682 if (!fs_devices->seeding)
1683 return -EINVAL;
1684
1685 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1686 if (!seed_devices)
1687 return -ENOMEM;
1688
1689 old_devices = clone_fs_devices(fs_devices);
1690 if (IS_ERR(old_devices)) {
1691 kfree(seed_devices);
1692 return PTR_ERR(old_devices);
1693 }
1694
1695 list_add(&old_devices->list, &fs_uuids);
1696
1697 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1698 seed_devices->opened = 1;
1699 INIT_LIST_HEAD(&seed_devices->devices);
1700 INIT_LIST_HEAD(&seed_devices->alloc_list);
1701 mutex_init(&seed_devices->device_list_mutex);
1702
1703 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1704 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1705 synchronize_rcu);
1706 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1707
1708 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1709 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1710 device->fs_devices = seed_devices;
1711 }
1712
1713 fs_devices->seeding = 0;
1714 fs_devices->num_devices = 0;
1715 fs_devices->open_devices = 0;
1716 fs_devices->total_devices = 0;
1717 fs_devices->seed = seed_devices;
1718
1719 generate_random_uuid(fs_devices->fsid);
1720 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1721 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1722 super_flags = btrfs_super_flags(disk_super) &
1723 ~BTRFS_SUPER_FLAG_SEEDING;
1724 btrfs_set_super_flags(disk_super, super_flags);
1725
1726 return 0;
1727 }
1728
1729 /*
1730 * strore the expected generation for seed devices in device items.
1731 */
1732 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1733 struct btrfs_root *root)
1734 {
1735 struct btrfs_path *path;
1736 struct extent_buffer *leaf;
1737 struct btrfs_dev_item *dev_item;
1738 struct btrfs_device *device;
1739 struct btrfs_key key;
1740 u8 fs_uuid[BTRFS_UUID_SIZE];
1741 u8 dev_uuid[BTRFS_UUID_SIZE];
1742 u64 devid;
1743 int ret;
1744
1745 path = btrfs_alloc_path();
1746 if (!path)
1747 return -ENOMEM;
1748
1749 root = root->fs_info->chunk_root;
1750 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1751 key.offset = 0;
1752 key.type = BTRFS_DEV_ITEM_KEY;
1753
1754 while (1) {
1755 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1756 if (ret < 0)
1757 goto error;
1758
1759 leaf = path->nodes[0];
1760 next_slot:
1761 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1762 ret = btrfs_next_leaf(root, path);
1763 if (ret > 0)
1764 break;
1765 if (ret < 0)
1766 goto error;
1767 leaf = path->nodes[0];
1768 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1769 btrfs_release_path(path);
1770 continue;
1771 }
1772
1773 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1774 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1775 key.type != BTRFS_DEV_ITEM_KEY)
1776 break;
1777
1778 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1779 struct btrfs_dev_item);
1780 devid = btrfs_device_id(leaf, dev_item);
1781 read_extent_buffer(leaf, dev_uuid,
1782 (unsigned long)btrfs_device_uuid(dev_item),
1783 BTRFS_UUID_SIZE);
1784 read_extent_buffer(leaf, fs_uuid,
1785 (unsigned long)btrfs_device_fsid(dev_item),
1786 BTRFS_UUID_SIZE);
1787 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1788 fs_uuid);
1789 BUG_ON(!device); /* Logic error */
1790
1791 if (device->fs_devices->seeding) {
1792 btrfs_set_device_generation(leaf, dev_item,
1793 device->generation);
1794 btrfs_mark_buffer_dirty(leaf);
1795 }
1796
1797 path->slots[0]++;
1798 goto next_slot;
1799 }
1800 ret = 0;
1801 error:
1802 btrfs_free_path(path);
1803 return ret;
1804 }
1805
1806 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1807 {
1808 struct request_queue *q;
1809 struct btrfs_trans_handle *trans;
1810 struct btrfs_device *device;
1811 struct block_device *bdev;
1812 struct list_head *devices;
1813 struct super_block *sb = root->fs_info->sb;
1814 struct rcu_string *name;
1815 u64 total_bytes;
1816 int seeding_dev = 0;
1817 int ret = 0;
1818
1819 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1820 return -EROFS;
1821
1822 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1823 root->fs_info->bdev_holder);
1824 if (IS_ERR(bdev))
1825 return PTR_ERR(bdev);
1826
1827 if (root->fs_info->fs_devices->seeding) {
1828 seeding_dev = 1;
1829 down_write(&sb->s_umount);
1830 mutex_lock(&uuid_mutex);
1831 }
1832
1833 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1834
1835 devices = &root->fs_info->fs_devices->devices;
1836
1837 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1838 list_for_each_entry(device, devices, dev_list) {
1839 if (device->bdev == bdev) {
1840 ret = -EEXIST;
1841 mutex_unlock(
1842 &root->fs_info->fs_devices->device_list_mutex);
1843 goto error;
1844 }
1845 }
1846 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1847
1848 device = kzalloc(sizeof(*device), GFP_NOFS);
1849 if (!device) {
1850 /* we can safely leave the fs_devices entry around */
1851 ret = -ENOMEM;
1852 goto error;
1853 }
1854
1855 name = rcu_string_strdup(device_path, GFP_NOFS);
1856 if (!name) {
1857 kfree(device);
1858 ret = -ENOMEM;
1859 goto error;
1860 }
1861 rcu_assign_pointer(device->name, name);
1862
1863 ret = find_next_devid(root, &device->devid);
1864 if (ret) {
1865 rcu_string_free(device->name);
1866 kfree(device);
1867 goto error;
1868 }
1869
1870 trans = btrfs_start_transaction(root, 0);
1871 if (IS_ERR(trans)) {
1872 rcu_string_free(device->name);
1873 kfree(device);
1874 ret = PTR_ERR(trans);
1875 goto error;
1876 }
1877
1878 lock_chunks(root);
1879
1880 q = bdev_get_queue(bdev);
1881 if (blk_queue_discard(q))
1882 device->can_discard = 1;
1883 device->writeable = 1;
1884 device->work.func = pending_bios_fn;
1885 generate_random_uuid(device->uuid);
1886 spin_lock_init(&device->io_lock);
1887 device->generation = trans->transid;
1888 device->io_width = root->sectorsize;
1889 device->io_align = root->sectorsize;
1890 device->sector_size = root->sectorsize;
1891 device->total_bytes = i_size_read(bdev->bd_inode);
1892 device->disk_total_bytes = device->total_bytes;
1893 device->dev_root = root->fs_info->dev_root;
1894 device->bdev = bdev;
1895 device->in_fs_metadata = 1;
1896 device->is_tgtdev_for_dev_replace = 0;
1897 device->mode = FMODE_EXCL;
1898 set_blocksize(device->bdev, 4096);
1899
1900 if (seeding_dev) {
1901 sb->s_flags &= ~MS_RDONLY;
1902 ret = btrfs_prepare_sprout(root);
1903 BUG_ON(ret); /* -ENOMEM */
1904 }
1905
1906 device->fs_devices = root->fs_info->fs_devices;
1907
1908 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1909 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1910 list_add(&device->dev_alloc_list,
1911 &root->fs_info->fs_devices->alloc_list);
1912 root->fs_info->fs_devices->num_devices++;
1913 root->fs_info->fs_devices->open_devices++;
1914 root->fs_info->fs_devices->rw_devices++;
1915 root->fs_info->fs_devices->total_devices++;
1916 if (device->can_discard)
1917 root->fs_info->fs_devices->num_can_discard++;
1918 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1919
1920 spin_lock(&root->fs_info->free_chunk_lock);
1921 root->fs_info->free_chunk_space += device->total_bytes;
1922 spin_unlock(&root->fs_info->free_chunk_lock);
1923
1924 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1925 root->fs_info->fs_devices->rotating = 1;
1926
1927 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1928 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1929 total_bytes + device->total_bytes);
1930
1931 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1932 btrfs_set_super_num_devices(root->fs_info->super_copy,
1933 total_bytes + 1);
1934 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1935
1936 if (seeding_dev) {
1937 ret = init_first_rw_device(trans, root, device);
1938 if (ret) {
1939 btrfs_abort_transaction(trans, root, ret);
1940 goto error_trans;
1941 }
1942 ret = btrfs_finish_sprout(trans, root);
1943 if (ret) {
1944 btrfs_abort_transaction(trans, root, ret);
1945 goto error_trans;
1946 }
1947 } else {
1948 ret = btrfs_add_device(trans, root, device);
1949 if (ret) {
1950 btrfs_abort_transaction(trans, root, ret);
1951 goto error_trans;
1952 }
1953 }
1954
1955 /*
1956 * we've got more storage, clear any full flags on the space
1957 * infos
1958 */
1959 btrfs_clear_space_info_full(root->fs_info);
1960
1961 unlock_chunks(root);
1962 root->fs_info->num_tolerated_disk_barrier_failures =
1963 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1964 ret = btrfs_commit_transaction(trans, root);
1965
1966 if (seeding_dev) {
1967 mutex_unlock(&uuid_mutex);
1968 up_write(&sb->s_umount);
1969
1970 if (ret) /* transaction commit */
1971 return ret;
1972
1973 ret = btrfs_relocate_sys_chunks(root);
1974 if (ret < 0)
1975 btrfs_error(root->fs_info, ret,
1976 "Failed to relocate sys chunks after "
1977 "device initialization. This can be fixed "
1978 "using the \"btrfs balance\" command.");
1979 trans = btrfs_attach_transaction(root);
1980 if (IS_ERR(trans)) {
1981 if (PTR_ERR(trans) == -ENOENT)
1982 return 0;
1983 return PTR_ERR(trans);
1984 }
1985 ret = btrfs_commit_transaction(trans, root);
1986 }
1987
1988 return ret;
1989
1990 error_trans:
1991 unlock_chunks(root);
1992 btrfs_end_transaction(trans, root);
1993 rcu_string_free(device->name);
1994 kfree(device);
1995 error:
1996 blkdev_put(bdev, FMODE_EXCL);
1997 if (seeding_dev) {
1998 mutex_unlock(&uuid_mutex);
1999 up_write(&sb->s_umount);
2000 }
2001 return ret;
2002 }
2003
2004 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2005 struct btrfs_device **device_out)
2006 {
2007 struct request_queue *q;
2008 struct btrfs_device *device;
2009 struct block_device *bdev;
2010 struct btrfs_fs_info *fs_info = root->fs_info;
2011 struct list_head *devices;
2012 struct rcu_string *name;
2013 int ret = 0;
2014
2015 *device_out = NULL;
2016 if (fs_info->fs_devices->seeding)
2017 return -EINVAL;
2018
2019 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2020 fs_info->bdev_holder);
2021 if (IS_ERR(bdev))
2022 return PTR_ERR(bdev);
2023
2024 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2025
2026 devices = &fs_info->fs_devices->devices;
2027 list_for_each_entry(device, devices, dev_list) {
2028 if (device->bdev == bdev) {
2029 ret = -EEXIST;
2030 goto error;
2031 }
2032 }
2033
2034 device = kzalloc(sizeof(*device), GFP_NOFS);
2035 if (!device) {
2036 ret = -ENOMEM;
2037 goto error;
2038 }
2039
2040 name = rcu_string_strdup(device_path, GFP_NOFS);
2041 if (!name) {
2042 kfree(device);
2043 ret = -ENOMEM;
2044 goto error;
2045 }
2046 rcu_assign_pointer(device->name, name);
2047
2048 q = bdev_get_queue(bdev);
2049 if (blk_queue_discard(q))
2050 device->can_discard = 1;
2051 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2052 device->writeable = 1;
2053 device->work.func = pending_bios_fn;
2054 generate_random_uuid(device->uuid);
2055 device->devid = BTRFS_DEV_REPLACE_DEVID;
2056 spin_lock_init(&device->io_lock);
2057 device->generation = 0;
2058 device->io_width = root->sectorsize;
2059 device->io_align = root->sectorsize;
2060 device->sector_size = root->sectorsize;
2061 device->total_bytes = i_size_read(bdev->bd_inode);
2062 device->disk_total_bytes = device->total_bytes;
2063 device->dev_root = fs_info->dev_root;
2064 device->bdev = bdev;
2065 device->in_fs_metadata = 1;
2066 device->is_tgtdev_for_dev_replace = 1;
2067 device->mode = FMODE_EXCL;
2068 set_blocksize(device->bdev, 4096);
2069 device->fs_devices = fs_info->fs_devices;
2070 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2071 fs_info->fs_devices->num_devices++;
2072 fs_info->fs_devices->open_devices++;
2073 if (device->can_discard)
2074 fs_info->fs_devices->num_can_discard++;
2075 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2076
2077 *device_out = device;
2078 return ret;
2079
2080 error:
2081 blkdev_put(bdev, FMODE_EXCL);
2082 return ret;
2083 }
2084
2085 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2086 struct btrfs_device *tgtdev)
2087 {
2088 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2089 tgtdev->io_width = fs_info->dev_root->sectorsize;
2090 tgtdev->io_align = fs_info->dev_root->sectorsize;
2091 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2092 tgtdev->dev_root = fs_info->dev_root;
2093 tgtdev->in_fs_metadata = 1;
2094 }
2095
2096 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2097 struct btrfs_device *device)
2098 {
2099 int ret;
2100 struct btrfs_path *path;
2101 struct btrfs_root *root;
2102 struct btrfs_dev_item *dev_item;
2103 struct extent_buffer *leaf;
2104 struct btrfs_key key;
2105
2106 root = device->dev_root->fs_info->chunk_root;
2107
2108 path = btrfs_alloc_path();
2109 if (!path)
2110 return -ENOMEM;
2111
2112 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2113 key.type = BTRFS_DEV_ITEM_KEY;
2114 key.offset = device->devid;
2115
2116 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2117 if (ret < 0)
2118 goto out;
2119
2120 if (ret > 0) {
2121 ret = -ENOENT;
2122 goto out;
2123 }
2124
2125 leaf = path->nodes[0];
2126 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2127
2128 btrfs_set_device_id(leaf, dev_item, device->devid);
2129 btrfs_set_device_type(leaf, dev_item, device->type);
2130 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2131 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2132 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2133 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2134 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2135 btrfs_mark_buffer_dirty(leaf);
2136
2137 out:
2138 btrfs_free_path(path);
2139 return ret;
2140 }
2141
2142 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2143 struct btrfs_device *device, u64 new_size)
2144 {
2145 struct btrfs_super_block *super_copy =
2146 device->dev_root->fs_info->super_copy;
2147 u64 old_total = btrfs_super_total_bytes(super_copy);
2148 u64 diff = new_size - device->total_bytes;
2149
2150 if (!device->writeable)
2151 return -EACCES;
2152 if (new_size <= device->total_bytes ||
2153 device->is_tgtdev_for_dev_replace)
2154 return -EINVAL;
2155
2156 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2157 device->fs_devices->total_rw_bytes += diff;
2158
2159 device->total_bytes = new_size;
2160 device->disk_total_bytes = new_size;
2161 btrfs_clear_space_info_full(device->dev_root->fs_info);
2162
2163 return btrfs_update_device(trans, device);
2164 }
2165
2166 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2167 struct btrfs_device *device, u64 new_size)
2168 {
2169 int ret;
2170 lock_chunks(device->dev_root);
2171 ret = __btrfs_grow_device(trans, device, new_size);
2172 unlock_chunks(device->dev_root);
2173 return ret;
2174 }
2175
2176 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2177 struct btrfs_root *root,
2178 u64 chunk_tree, u64 chunk_objectid,
2179 u64 chunk_offset)
2180 {
2181 int ret;
2182 struct btrfs_path *path;
2183 struct btrfs_key key;
2184
2185 root = root->fs_info->chunk_root;
2186 path = btrfs_alloc_path();
2187 if (!path)
2188 return -ENOMEM;
2189
2190 key.objectid = chunk_objectid;
2191 key.offset = chunk_offset;
2192 key.type = BTRFS_CHUNK_ITEM_KEY;
2193
2194 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2195 if (ret < 0)
2196 goto out;
2197 else if (ret > 0) { /* Logic error or corruption */
2198 btrfs_error(root->fs_info, -ENOENT,
2199 "Failed lookup while freeing chunk.");
2200 ret = -ENOENT;
2201 goto out;
2202 }
2203
2204 ret = btrfs_del_item(trans, root, path);
2205 if (ret < 0)
2206 btrfs_error(root->fs_info, ret,
2207 "Failed to delete chunk item.");
2208 out:
2209 btrfs_free_path(path);
2210 return ret;
2211 }
2212
2213 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2214 chunk_offset)
2215 {
2216 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2217 struct btrfs_disk_key *disk_key;
2218 struct btrfs_chunk *chunk;
2219 u8 *ptr;
2220 int ret = 0;
2221 u32 num_stripes;
2222 u32 array_size;
2223 u32 len = 0;
2224 u32 cur;
2225 struct btrfs_key key;
2226
2227 array_size = btrfs_super_sys_array_size(super_copy);
2228
2229 ptr = super_copy->sys_chunk_array;
2230 cur = 0;
2231
2232 while (cur < array_size) {
2233 disk_key = (struct btrfs_disk_key *)ptr;
2234 btrfs_disk_key_to_cpu(&key, disk_key);
2235
2236 len = sizeof(*disk_key);
2237
2238 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2239 chunk = (struct btrfs_chunk *)(ptr + len);
2240 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2241 len += btrfs_chunk_item_size(num_stripes);
2242 } else {
2243 ret = -EIO;
2244 break;
2245 }
2246 if (key.objectid == chunk_objectid &&
2247 key.offset == chunk_offset) {
2248 memmove(ptr, ptr + len, array_size - (cur + len));
2249 array_size -= len;
2250 btrfs_set_super_sys_array_size(super_copy, array_size);
2251 } else {
2252 ptr += len;
2253 cur += len;
2254 }
2255 }
2256 return ret;
2257 }
2258
2259 static int btrfs_relocate_chunk(struct btrfs_root *root,
2260 u64 chunk_tree, u64 chunk_objectid,
2261 u64 chunk_offset)
2262 {
2263 struct extent_map_tree *em_tree;
2264 struct btrfs_root *extent_root;
2265 struct btrfs_trans_handle *trans;
2266 struct extent_map *em;
2267 struct map_lookup *map;
2268 int ret;
2269 int i;
2270
2271 root = root->fs_info->chunk_root;
2272 extent_root = root->fs_info->extent_root;
2273 em_tree = &root->fs_info->mapping_tree.map_tree;
2274
2275 ret = btrfs_can_relocate(extent_root, chunk_offset);
2276 if (ret)
2277 return -ENOSPC;
2278
2279 /* step one, relocate all the extents inside this chunk */
2280 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2281 if (ret)
2282 return ret;
2283
2284 trans = btrfs_start_transaction(root, 0);
2285 BUG_ON(IS_ERR(trans));
2286
2287 lock_chunks(root);
2288
2289 /*
2290 * step two, delete the device extents and the
2291 * chunk tree entries
2292 */
2293 read_lock(&em_tree->lock);
2294 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2295 read_unlock(&em_tree->lock);
2296
2297 BUG_ON(!em || em->start > chunk_offset ||
2298 em->start + em->len < chunk_offset);
2299 map = (struct map_lookup *)em->bdev;
2300
2301 for (i = 0; i < map->num_stripes; i++) {
2302 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2303 map->stripes[i].physical);
2304 BUG_ON(ret);
2305
2306 if (map->stripes[i].dev) {
2307 ret = btrfs_update_device(trans, map->stripes[i].dev);
2308 BUG_ON(ret);
2309 }
2310 }
2311 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2312 chunk_offset);
2313
2314 BUG_ON(ret);
2315
2316 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2317
2318 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2319 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2320 BUG_ON(ret);
2321 }
2322
2323 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2324 BUG_ON(ret);
2325
2326 write_lock(&em_tree->lock);
2327 remove_extent_mapping(em_tree, em);
2328 write_unlock(&em_tree->lock);
2329
2330 kfree(map);
2331 em->bdev = NULL;
2332
2333 /* once for the tree */
2334 free_extent_map(em);
2335 /* once for us */
2336 free_extent_map(em);
2337
2338 unlock_chunks(root);
2339 btrfs_end_transaction(trans, root);
2340 return 0;
2341 }
2342
2343 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2344 {
2345 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2346 struct btrfs_path *path;
2347 struct extent_buffer *leaf;
2348 struct btrfs_chunk *chunk;
2349 struct btrfs_key key;
2350 struct btrfs_key found_key;
2351 u64 chunk_tree = chunk_root->root_key.objectid;
2352 u64 chunk_type;
2353 bool retried = false;
2354 int failed = 0;
2355 int ret;
2356
2357 path = btrfs_alloc_path();
2358 if (!path)
2359 return -ENOMEM;
2360
2361 again:
2362 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2363 key.offset = (u64)-1;
2364 key.type = BTRFS_CHUNK_ITEM_KEY;
2365
2366 while (1) {
2367 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2368 if (ret < 0)
2369 goto error;
2370 BUG_ON(ret == 0); /* Corruption */
2371
2372 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2373 key.type);
2374 if (ret < 0)
2375 goto error;
2376 if (ret > 0)
2377 break;
2378
2379 leaf = path->nodes[0];
2380 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2381
2382 chunk = btrfs_item_ptr(leaf, path->slots[0],
2383 struct btrfs_chunk);
2384 chunk_type = btrfs_chunk_type(leaf, chunk);
2385 btrfs_release_path(path);
2386
2387 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2388 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2389 found_key.objectid,
2390 found_key.offset);
2391 if (ret == -ENOSPC)
2392 failed++;
2393 else if (ret)
2394 BUG();
2395 }
2396
2397 if (found_key.offset == 0)
2398 break;
2399 key.offset = found_key.offset - 1;
2400 }
2401 ret = 0;
2402 if (failed && !retried) {
2403 failed = 0;
2404 retried = true;
2405 goto again;
2406 } else if (failed && retried) {
2407 WARN_ON(1);
2408 ret = -ENOSPC;
2409 }
2410 error:
2411 btrfs_free_path(path);
2412 return ret;
2413 }
2414
2415 static int insert_balance_item(struct btrfs_root *root,
2416 struct btrfs_balance_control *bctl)
2417 {
2418 struct btrfs_trans_handle *trans;
2419 struct btrfs_balance_item *item;
2420 struct btrfs_disk_balance_args disk_bargs;
2421 struct btrfs_path *path;
2422 struct extent_buffer *leaf;
2423 struct btrfs_key key;
2424 int ret, err;
2425
2426 path = btrfs_alloc_path();
2427 if (!path)
2428 return -ENOMEM;
2429
2430 trans = btrfs_start_transaction(root, 0);
2431 if (IS_ERR(trans)) {
2432 btrfs_free_path(path);
2433 return PTR_ERR(trans);
2434 }
2435
2436 key.objectid = BTRFS_BALANCE_OBJECTID;
2437 key.type = BTRFS_BALANCE_ITEM_KEY;
2438 key.offset = 0;
2439
2440 ret = btrfs_insert_empty_item(trans, root, path, &key,
2441 sizeof(*item));
2442 if (ret)
2443 goto out;
2444
2445 leaf = path->nodes[0];
2446 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2447
2448 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2449
2450 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2451 btrfs_set_balance_data(leaf, item, &disk_bargs);
2452 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2453 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2454 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2455 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2456
2457 btrfs_set_balance_flags(leaf, item, bctl->flags);
2458
2459 btrfs_mark_buffer_dirty(leaf);
2460 out:
2461 btrfs_free_path(path);
2462 err = btrfs_commit_transaction(trans, root);
2463 if (err && !ret)
2464 ret = err;
2465 return ret;
2466 }
2467
2468 static int del_balance_item(struct btrfs_root *root)
2469 {
2470 struct btrfs_trans_handle *trans;
2471 struct btrfs_path *path;
2472 struct btrfs_key key;
2473 int ret, err;
2474
2475 path = btrfs_alloc_path();
2476 if (!path)
2477 return -ENOMEM;
2478
2479 trans = btrfs_start_transaction(root, 0);
2480 if (IS_ERR(trans)) {
2481 btrfs_free_path(path);
2482 return PTR_ERR(trans);
2483 }
2484
2485 key.objectid = BTRFS_BALANCE_OBJECTID;
2486 key.type = BTRFS_BALANCE_ITEM_KEY;
2487 key.offset = 0;
2488
2489 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2490 if (ret < 0)
2491 goto out;
2492 if (ret > 0) {
2493 ret = -ENOENT;
2494 goto out;
2495 }
2496
2497 ret = btrfs_del_item(trans, root, path);
2498 out:
2499 btrfs_free_path(path);
2500 err = btrfs_commit_transaction(trans, root);
2501 if (err && !ret)
2502 ret = err;
2503 return ret;
2504 }
2505
2506 /*
2507 * This is a heuristic used to reduce the number of chunks balanced on
2508 * resume after balance was interrupted.
2509 */
2510 static void update_balance_args(struct btrfs_balance_control *bctl)
2511 {
2512 /*
2513 * Turn on soft mode for chunk types that were being converted.
2514 */
2515 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2516 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2517 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2518 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2519 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2520 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2521
2522 /*
2523 * Turn on usage filter if is not already used. The idea is
2524 * that chunks that we have already balanced should be
2525 * reasonably full. Don't do it for chunks that are being
2526 * converted - that will keep us from relocating unconverted
2527 * (albeit full) chunks.
2528 */
2529 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2530 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2531 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2532 bctl->data.usage = 90;
2533 }
2534 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2535 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2536 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2537 bctl->sys.usage = 90;
2538 }
2539 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2540 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2541 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2542 bctl->meta.usage = 90;
2543 }
2544 }
2545
2546 /*
2547 * Should be called with both balance and volume mutexes held to
2548 * serialize other volume operations (add_dev/rm_dev/resize) with
2549 * restriper. Same goes for unset_balance_control.
2550 */
2551 static void set_balance_control(struct btrfs_balance_control *bctl)
2552 {
2553 struct btrfs_fs_info *fs_info = bctl->fs_info;
2554
2555 BUG_ON(fs_info->balance_ctl);
2556
2557 spin_lock(&fs_info->balance_lock);
2558 fs_info->balance_ctl = bctl;
2559 spin_unlock(&fs_info->balance_lock);
2560 }
2561
2562 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2563 {
2564 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2565
2566 BUG_ON(!fs_info->balance_ctl);
2567
2568 spin_lock(&fs_info->balance_lock);
2569 fs_info->balance_ctl = NULL;
2570 spin_unlock(&fs_info->balance_lock);
2571
2572 kfree(bctl);
2573 }
2574
2575 /*
2576 * Balance filters. Return 1 if chunk should be filtered out
2577 * (should not be balanced).
2578 */
2579 static int chunk_profiles_filter(u64 chunk_type,
2580 struct btrfs_balance_args *bargs)
2581 {
2582 chunk_type = chunk_to_extended(chunk_type) &
2583 BTRFS_EXTENDED_PROFILE_MASK;
2584
2585 if (bargs->profiles & chunk_type)
2586 return 0;
2587
2588 return 1;
2589 }
2590
2591 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2592 struct btrfs_balance_args *bargs)
2593 {
2594 struct btrfs_block_group_cache *cache;
2595 u64 chunk_used, user_thresh;
2596 int ret = 1;
2597
2598 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2599 chunk_used = btrfs_block_group_used(&cache->item);
2600
2601 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2602 if (chunk_used < user_thresh)
2603 ret = 0;
2604
2605 btrfs_put_block_group(cache);
2606 return ret;
2607 }
2608
2609 static int chunk_devid_filter(struct extent_buffer *leaf,
2610 struct btrfs_chunk *chunk,
2611 struct btrfs_balance_args *bargs)
2612 {
2613 struct btrfs_stripe *stripe;
2614 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2615 int i;
2616
2617 for (i = 0; i < num_stripes; i++) {
2618 stripe = btrfs_stripe_nr(chunk, i);
2619 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2620 return 0;
2621 }
2622
2623 return 1;
2624 }
2625
2626 /* [pstart, pend) */
2627 static int chunk_drange_filter(struct extent_buffer *leaf,
2628 struct btrfs_chunk *chunk,
2629 u64 chunk_offset,
2630 struct btrfs_balance_args *bargs)
2631 {
2632 struct btrfs_stripe *stripe;
2633 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2634 u64 stripe_offset;
2635 u64 stripe_length;
2636 int factor;
2637 int i;
2638
2639 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2640 return 0;
2641
2642 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2643 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2644 factor = 2;
2645 else
2646 factor = 1;
2647 factor = num_stripes / factor;
2648
2649 for (i = 0; i < num_stripes; i++) {
2650 stripe = btrfs_stripe_nr(chunk, i);
2651 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2652 continue;
2653
2654 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2655 stripe_length = btrfs_chunk_length(leaf, chunk);
2656 do_div(stripe_length, factor);
2657
2658 if (stripe_offset < bargs->pend &&
2659 stripe_offset + stripe_length > bargs->pstart)
2660 return 0;
2661 }
2662
2663 return 1;
2664 }
2665
2666 /* [vstart, vend) */
2667 static int chunk_vrange_filter(struct extent_buffer *leaf,
2668 struct btrfs_chunk *chunk,
2669 u64 chunk_offset,
2670 struct btrfs_balance_args *bargs)
2671 {
2672 if (chunk_offset < bargs->vend &&
2673 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2674 /* at least part of the chunk is inside this vrange */
2675 return 0;
2676
2677 return 1;
2678 }
2679
2680 static int chunk_soft_convert_filter(u64 chunk_type,
2681 struct btrfs_balance_args *bargs)
2682 {
2683 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2684 return 0;
2685
2686 chunk_type = chunk_to_extended(chunk_type) &
2687 BTRFS_EXTENDED_PROFILE_MASK;
2688
2689 if (bargs->target == chunk_type)
2690 return 1;
2691
2692 return 0;
2693 }
2694
2695 static int should_balance_chunk(struct btrfs_root *root,
2696 struct extent_buffer *leaf,
2697 struct btrfs_chunk *chunk, u64 chunk_offset)
2698 {
2699 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2700 struct btrfs_balance_args *bargs = NULL;
2701 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2702
2703 /* type filter */
2704 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2705 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2706 return 0;
2707 }
2708
2709 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2710 bargs = &bctl->data;
2711 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2712 bargs = &bctl->sys;
2713 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2714 bargs = &bctl->meta;
2715
2716 /* profiles filter */
2717 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2718 chunk_profiles_filter(chunk_type, bargs)) {
2719 return 0;
2720 }
2721
2722 /* usage filter */
2723 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2724 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2725 return 0;
2726 }
2727
2728 /* devid filter */
2729 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2730 chunk_devid_filter(leaf, chunk, bargs)) {
2731 return 0;
2732 }
2733
2734 /* drange filter, makes sense only with devid filter */
2735 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2736 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2737 return 0;
2738 }
2739
2740 /* vrange filter */
2741 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2742 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2743 return 0;
2744 }
2745
2746 /* soft profile changing mode */
2747 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2748 chunk_soft_convert_filter(chunk_type, bargs)) {
2749 return 0;
2750 }
2751
2752 return 1;
2753 }
2754
2755 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2756 {
2757 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2758 struct btrfs_root *chunk_root = fs_info->chunk_root;
2759 struct btrfs_root *dev_root = fs_info->dev_root;
2760 struct list_head *devices;
2761 struct btrfs_device *device;
2762 u64 old_size;
2763 u64 size_to_free;
2764 struct btrfs_chunk *chunk;
2765 struct btrfs_path *path;
2766 struct btrfs_key key;
2767 struct btrfs_key found_key;
2768 struct btrfs_trans_handle *trans;
2769 struct extent_buffer *leaf;
2770 int slot;
2771 int ret;
2772 int enospc_errors = 0;
2773 bool counting = true;
2774
2775 /* step one make some room on all the devices */
2776 devices = &fs_info->fs_devices->devices;
2777 list_for_each_entry(device, devices, dev_list) {
2778 old_size = device->total_bytes;
2779 size_to_free = div_factor(old_size, 1);
2780 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2781 if (!device->writeable ||
2782 device->total_bytes - device->bytes_used > size_to_free ||
2783 device->is_tgtdev_for_dev_replace)
2784 continue;
2785
2786 ret = btrfs_shrink_device(device, old_size - size_to_free);
2787 if (ret == -ENOSPC)
2788 break;
2789 BUG_ON(ret);
2790
2791 trans = btrfs_start_transaction(dev_root, 0);
2792 BUG_ON(IS_ERR(trans));
2793
2794 ret = btrfs_grow_device(trans, device, old_size);
2795 BUG_ON(ret);
2796
2797 btrfs_end_transaction(trans, dev_root);
2798 }
2799
2800 /* step two, relocate all the chunks */
2801 path = btrfs_alloc_path();
2802 if (!path) {
2803 ret = -ENOMEM;
2804 goto error;
2805 }
2806
2807 /* zero out stat counters */
2808 spin_lock(&fs_info->balance_lock);
2809 memset(&bctl->stat, 0, sizeof(bctl->stat));
2810 spin_unlock(&fs_info->balance_lock);
2811 again:
2812 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2813 key.offset = (u64)-1;
2814 key.type = BTRFS_CHUNK_ITEM_KEY;
2815
2816 while (1) {
2817 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2818 atomic_read(&fs_info->balance_cancel_req)) {
2819 ret = -ECANCELED;
2820 goto error;
2821 }
2822
2823 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2824 if (ret < 0)
2825 goto error;
2826
2827 /*
2828 * this shouldn't happen, it means the last relocate
2829 * failed
2830 */
2831 if (ret == 0)
2832 BUG(); /* FIXME break ? */
2833
2834 ret = btrfs_previous_item(chunk_root, path, 0,
2835 BTRFS_CHUNK_ITEM_KEY);
2836 if (ret) {
2837 ret = 0;
2838 break;
2839 }
2840
2841 leaf = path->nodes[0];
2842 slot = path->slots[0];
2843 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2844
2845 if (found_key.objectid != key.objectid)
2846 break;
2847
2848 /* chunk zero is special */
2849 if (found_key.offset == 0)
2850 break;
2851
2852 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2853
2854 if (!counting) {
2855 spin_lock(&fs_info->balance_lock);
2856 bctl->stat.considered++;
2857 spin_unlock(&fs_info->balance_lock);
2858 }
2859
2860 ret = should_balance_chunk(chunk_root, leaf, chunk,
2861 found_key.offset);
2862 btrfs_release_path(path);
2863 if (!ret)
2864 goto loop;
2865
2866 if (counting) {
2867 spin_lock(&fs_info->balance_lock);
2868 bctl->stat.expected++;
2869 spin_unlock(&fs_info->balance_lock);
2870 goto loop;
2871 }
2872
2873 ret = btrfs_relocate_chunk(chunk_root,
2874 chunk_root->root_key.objectid,
2875 found_key.objectid,
2876 found_key.offset);
2877 if (ret && ret != -ENOSPC)
2878 goto error;
2879 if (ret == -ENOSPC) {
2880 enospc_errors++;
2881 } else {
2882 spin_lock(&fs_info->balance_lock);
2883 bctl->stat.completed++;
2884 spin_unlock(&fs_info->balance_lock);
2885 }
2886 loop:
2887 key.offset = found_key.offset - 1;
2888 }
2889
2890 if (counting) {
2891 btrfs_release_path(path);
2892 counting = false;
2893 goto again;
2894 }
2895 error:
2896 btrfs_free_path(path);
2897 if (enospc_errors) {
2898 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2899 enospc_errors);
2900 if (!ret)
2901 ret = -ENOSPC;
2902 }
2903
2904 return ret;
2905 }
2906
2907 /**
2908 * alloc_profile_is_valid - see if a given profile is valid and reduced
2909 * @flags: profile to validate
2910 * @extended: if true @flags is treated as an extended profile
2911 */
2912 static int alloc_profile_is_valid(u64 flags, int extended)
2913 {
2914 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2915 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2916
2917 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2918
2919 /* 1) check that all other bits are zeroed */
2920 if (flags & ~mask)
2921 return 0;
2922
2923 /* 2) see if profile is reduced */
2924 if (flags == 0)
2925 return !extended; /* "0" is valid for usual profiles */
2926
2927 /* true if exactly one bit set */
2928 return (flags & (flags - 1)) == 0;
2929 }
2930
2931 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2932 {
2933 /* cancel requested || normal exit path */
2934 return atomic_read(&fs_info->balance_cancel_req) ||
2935 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2936 atomic_read(&fs_info->balance_cancel_req) == 0);
2937 }
2938
2939 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2940 {
2941 int ret;
2942
2943 unset_balance_control(fs_info);
2944 ret = del_balance_item(fs_info->tree_root);
2945 BUG_ON(ret);
2946 }
2947
2948 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2949 struct btrfs_ioctl_balance_args *bargs);
2950
2951 /*
2952 * Should be called with both balance and volume mutexes held
2953 */
2954 int btrfs_balance(struct btrfs_balance_control *bctl,
2955 struct btrfs_ioctl_balance_args *bargs)
2956 {
2957 struct btrfs_fs_info *fs_info = bctl->fs_info;
2958 u64 allowed;
2959 int mixed = 0;
2960 int ret;
2961 u64 num_devices;
2962
2963 if (btrfs_fs_closing(fs_info) ||
2964 atomic_read(&fs_info->balance_pause_req) ||
2965 atomic_read(&fs_info->balance_cancel_req)) {
2966 ret = -EINVAL;
2967 goto out;
2968 }
2969
2970 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2971 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2972 mixed = 1;
2973
2974 /*
2975 * In case of mixed groups both data and meta should be picked,
2976 * and identical options should be given for both of them.
2977 */
2978 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2979 if (mixed && (bctl->flags & allowed)) {
2980 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2981 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2982 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2983 printk(KERN_ERR "btrfs: with mixed groups data and "
2984 "metadata balance options must be the same\n");
2985 ret = -EINVAL;
2986 goto out;
2987 }
2988 }
2989
2990 num_devices = fs_info->fs_devices->num_devices;
2991 btrfs_dev_replace_lock(&fs_info->dev_replace);
2992 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2993 BUG_ON(num_devices < 1);
2994 num_devices--;
2995 }
2996 btrfs_dev_replace_unlock(&fs_info->dev_replace);
2997 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2998 if (num_devices == 1)
2999 allowed |= BTRFS_BLOCK_GROUP_DUP;
3000 else if (num_devices < 4)
3001 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3002 else
3003 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
3004 BTRFS_BLOCK_GROUP_RAID10);
3005
3006 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3007 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3008 (bctl->data.target & ~allowed))) {
3009 printk(KERN_ERR "btrfs: unable to start balance with target "
3010 "data profile %llu\n",
3011 (unsigned long long)bctl->data.target);
3012 ret = -EINVAL;
3013 goto out;
3014 }
3015 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3016 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3017 (bctl->meta.target & ~allowed))) {
3018 printk(KERN_ERR "btrfs: unable to start balance with target "
3019 "metadata profile %llu\n",
3020 (unsigned long long)bctl->meta.target);
3021 ret = -EINVAL;
3022 goto out;
3023 }
3024 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3025 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3026 (bctl->sys.target & ~allowed))) {
3027 printk(KERN_ERR "btrfs: unable to start balance with target "
3028 "system profile %llu\n",
3029 (unsigned long long)bctl->sys.target);
3030 ret = -EINVAL;
3031 goto out;
3032 }
3033
3034 /* allow dup'ed data chunks only in mixed mode */
3035 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3036 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3037 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3038 ret = -EINVAL;
3039 goto out;
3040 }
3041
3042 /* allow to reduce meta or sys integrity only if force set */
3043 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3044 BTRFS_BLOCK_GROUP_RAID10;
3045 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3046 (fs_info->avail_system_alloc_bits & allowed) &&
3047 !(bctl->sys.target & allowed)) ||
3048 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3049 (fs_info->avail_metadata_alloc_bits & allowed) &&
3050 !(bctl->meta.target & allowed))) {
3051 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3052 printk(KERN_INFO "btrfs: force reducing metadata "
3053 "integrity\n");
3054 } else {
3055 printk(KERN_ERR "btrfs: balance will reduce metadata "
3056 "integrity, use force if you want this\n");
3057 ret = -EINVAL;
3058 goto out;
3059 }
3060 }
3061
3062 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3063 int num_tolerated_disk_barrier_failures;
3064 u64 target = bctl->sys.target;
3065
3066 num_tolerated_disk_barrier_failures =
3067 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3068 if (num_tolerated_disk_barrier_failures > 0 &&
3069 (target &
3070 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3071 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3072 num_tolerated_disk_barrier_failures = 0;
3073 else if (num_tolerated_disk_barrier_failures > 1 &&
3074 (target &
3075 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3076 num_tolerated_disk_barrier_failures = 1;
3077
3078 fs_info->num_tolerated_disk_barrier_failures =
3079 num_tolerated_disk_barrier_failures;
3080 }
3081
3082 ret = insert_balance_item(fs_info->tree_root, bctl);
3083 if (ret && ret != -EEXIST)
3084 goto out;
3085
3086 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3087 BUG_ON(ret == -EEXIST);
3088 set_balance_control(bctl);
3089 } else {
3090 BUG_ON(ret != -EEXIST);
3091 spin_lock(&fs_info->balance_lock);
3092 update_balance_args(bctl);
3093 spin_unlock(&fs_info->balance_lock);
3094 }
3095
3096 atomic_inc(&fs_info->balance_running);
3097 mutex_unlock(&fs_info->balance_mutex);
3098
3099 ret = __btrfs_balance(fs_info);
3100
3101 mutex_lock(&fs_info->balance_mutex);
3102 atomic_dec(&fs_info->balance_running);
3103
3104 if (bargs) {
3105 memset(bargs, 0, sizeof(*bargs));
3106 update_ioctl_balance_args(fs_info, 0, bargs);
3107 }
3108
3109 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3110 balance_need_close(fs_info)) {
3111 __cancel_balance(fs_info);
3112 }
3113
3114 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3115 fs_info->num_tolerated_disk_barrier_failures =
3116 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3117 }
3118
3119 wake_up(&fs_info->balance_wait_q);
3120
3121 return ret;
3122 out:
3123 if (bctl->flags & BTRFS_BALANCE_RESUME)
3124 __cancel_balance(fs_info);
3125 else
3126 kfree(bctl);
3127 return ret;
3128 }
3129
3130 static int balance_kthread(void *data)
3131 {
3132 struct btrfs_fs_info *fs_info = data;
3133 int ret = 0;
3134
3135 mutex_lock(&fs_info->volume_mutex);
3136 mutex_lock(&fs_info->balance_mutex);
3137
3138 if (fs_info->balance_ctl) {
3139 printk(KERN_INFO "btrfs: continuing balance\n");
3140 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3141 }
3142
3143 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3144 mutex_unlock(&fs_info->balance_mutex);
3145 mutex_unlock(&fs_info->volume_mutex);
3146
3147 return ret;
3148 }
3149
3150 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3151 {
3152 struct task_struct *tsk;
3153
3154 spin_lock(&fs_info->balance_lock);
3155 if (!fs_info->balance_ctl) {
3156 spin_unlock(&fs_info->balance_lock);
3157 return 0;
3158 }
3159 spin_unlock(&fs_info->balance_lock);
3160
3161 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3162 printk(KERN_INFO "btrfs: force skipping balance\n");
3163 return 0;
3164 }
3165
3166 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3167 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3168 if (IS_ERR(tsk))
3169 return PTR_ERR(tsk);
3170
3171 return 0;
3172 }
3173
3174 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3175 {
3176 struct btrfs_balance_control *bctl;
3177 struct btrfs_balance_item *item;
3178 struct btrfs_disk_balance_args disk_bargs;
3179 struct btrfs_path *path;
3180 struct extent_buffer *leaf;
3181 struct btrfs_key key;
3182 int ret;
3183
3184 path = btrfs_alloc_path();
3185 if (!path)
3186 return -ENOMEM;
3187
3188 key.objectid = BTRFS_BALANCE_OBJECTID;
3189 key.type = BTRFS_BALANCE_ITEM_KEY;
3190 key.offset = 0;
3191
3192 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3193 if (ret < 0)
3194 goto out;
3195 if (ret > 0) { /* ret = -ENOENT; */
3196 ret = 0;
3197 goto out;
3198 }
3199
3200 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3201 if (!bctl) {
3202 ret = -ENOMEM;
3203 goto out;
3204 }
3205
3206 leaf = path->nodes[0];
3207 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3208
3209 bctl->fs_info = fs_info;
3210 bctl->flags = btrfs_balance_flags(leaf, item);
3211 bctl->flags |= BTRFS_BALANCE_RESUME;
3212
3213 btrfs_balance_data(leaf, item, &disk_bargs);
3214 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3215 btrfs_balance_meta(leaf, item, &disk_bargs);
3216 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3217 btrfs_balance_sys(leaf, item, &disk_bargs);
3218 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3219
3220 mutex_lock(&fs_info->volume_mutex);
3221 mutex_lock(&fs_info->balance_mutex);
3222
3223 set_balance_control(bctl);
3224
3225 mutex_unlock(&fs_info->balance_mutex);
3226 mutex_unlock(&fs_info->volume_mutex);
3227 out:
3228 btrfs_free_path(path);
3229 return ret;
3230 }
3231
3232 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3233 {
3234 int ret = 0;
3235
3236 mutex_lock(&fs_info->balance_mutex);
3237 if (!fs_info->balance_ctl) {
3238 mutex_unlock(&fs_info->balance_mutex);
3239 return -ENOTCONN;
3240 }
3241
3242 if (atomic_read(&fs_info->balance_running)) {
3243 atomic_inc(&fs_info->balance_pause_req);
3244 mutex_unlock(&fs_info->balance_mutex);
3245
3246 wait_event(fs_info->balance_wait_q,
3247 atomic_read(&fs_info->balance_running) == 0);
3248
3249 mutex_lock(&fs_info->balance_mutex);
3250 /* we are good with balance_ctl ripped off from under us */
3251 BUG_ON(atomic_read(&fs_info->balance_running));
3252 atomic_dec(&fs_info->balance_pause_req);
3253 } else {
3254 ret = -ENOTCONN;
3255 }
3256
3257 mutex_unlock(&fs_info->balance_mutex);
3258 return ret;
3259 }
3260
3261 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3262 {
3263 mutex_lock(&fs_info->balance_mutex);
3264 if (!fs_info->balance_ctl) {
3265 mutex_unlock(&fs_info->balance_mutex);
3266 return -ENOTCONN;
3267 }
3268
3269 atomic_inc(&fs_info->balance_cancel_req);
3270 /*
3271 * if we are running just wait and return, balance item is
3272 * deleted in btrfs_balance in this case
3273 */
3274 if (atomic_read(&fs_info->balance_running)) {
3275 mutex_unlock(&fs_info->balance_mutex);
3276 wait_event(fs_info->balance_wait_q,
3277 atomic_read(&fs_info->balance_running) == 0);
3278 mutex_lock(&fs_info->balance_mutex);
3279 } else {
3280 /* __cancel_balance needs volume_mutex */
3281 mutex_unlock(&fs_info->balance_mutex);
3282 mutex_lock(&fs_info->volume_mutex);
3283 mutex_lock(&fs_info->balance_mutex);
3284
3285 if (fs_info->balance_ctl)
3286 __cancel_balance(fs_info);
3287
3288 mutex_unlock(&fs_info->volume_mutex);
3289 }
3290
3291 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3292 atomic_dec(&fs_info->balance_cancel_req);
3293 mutex_unlock(&fs_info->balance_mutex);
3294 return 0;
3295 }
3296
3297 /*
3298 * shrinking a device means finding all of the device extents past
3299 * the new size, and then following the back refs to the chunks.
3300 * The chunk relocation code actually frees the device extent
3301 */
3302 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3303 {
3304 struct btrfs_trans_handle *trans;
3305 struct btrfs_root *root = device->dev_root;
3306 struct btrfs_dev_extent *dev_extent = NULL;
3307 struct btrfs_path *path;
3308 u64 length;
3309 u64 chunk_tree;
3310 u64 chunk_objectid;
3311 u64 chunk_offset;
3312 int ret;
3313 int slot;
3314 int failed = 0;
3315 bool retried = false;
3316 struct extent_buffer *l;
3317 struct btrfs_key key;
3318 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3319 u64 old_total = btrfs_super_total_bytes(super_copy);
3320 u64 old_size = device->total_bytes;
3321 u64 diff = device->total_bytes - new_size;
3322
3323 if (device->is_tgtdev_for_dev_replace)
3324 return -EINVAL;
3325
3326 path = btrfs_alloc_path();
3327 if (!path)
3328 return -ENOMEM;
3329
3330 path->reada = 2;
3331
3332 lock_chunks(root);
3333
3334 device->total_bytes = new_size;
3335 if (device->writeable) {
3336 device->fs_devices->total_rw_bytes -= diff;
3337 spin_lock(&root->fs_info->free_chunk_lock);
3338 root->fs_info->free_chunk_space -= diff;
3339 spin_unlock(&root->fs_info->free_chunk_lock);
3340 }
3341 unlock_chunks(root);
3342
3343 again:
3344 key.objectid = device->devid;
3345 key.offset = (u64)-1;
3346 key.type = BTRFS_DEV_EXTENT_KEY;
3347
3348 do {
3349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3350 if (ret < 0)
3351 goto done;
3352
3353 ret = btrfs_previous_item(root, path, 0, key.type);
3354 if (ret < 0)
3355 goto done;
3356 if (ret) {
3357 ret = 0;
3358 btrfs_release_path(path);
3359 break;
3360 }
3361
3362 l = path->nodes[0];
3363 slot = path->slots[0];
3364 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3365
3366 if (key.objectid != device->devid) {
3367 btrfs_release_path(path);
3368 break;
3369 }
3370
3371 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3372 length = btrfs_dev_extent_length(l, dev_extent);
3373
3374 if (key.offset + length <= new_size) {
3375 btrfs_release_path(path);
3376 break;
3377 }
3378
3379 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3380 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3381 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3382 btrfs_release_path(path);
3383
3384 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3385 chunk_offset);
3386 if (ret && ret != -ENOSPC)
3387 goto done;
3388 if (ret == -ENOSPC)
3389 failed++;
3390 } while (key.offset-- > 0);
3391
3392 if (failed && !retried) {
3393 failed = 0;
3394 retried = true;
3395 goto again;
3396 } else if (failed && retried) {
3397 ret = -ENOSPC;
3398 lock_chunks(root);
3399
3400 device->total_bytes = old_size;
3401 if (device->writeable)
3402 device->fs_devices->total_rw_bytes += diff;
3403 spin_lock(&root->fs_info->free_chunk_lock);
3404 root->fs_info->free_chunk_space += diff;
3405 spin_unlock(&root->fs_info->free_chunk_lock);
3406 unlock_chunks(root);
3407 goto done;
3408 }
3409
3410 /* Shrinking succeeded, else we would be at "done". */
3411 trans = btrfs_start_transaction(root, 0);
3412 if (IS_ERR(trans)) {
3413 ret = PTR_ERR(trans);
3414 goto done;
3415 }
3416
3417 lock_chunks(root);
3418
3419 device->disk_total_bytes = new_size;
3420 /* Now btrfs_update_device() will change the on-disk size. */
3421 ret = btrfs_update_device(trans, device);
3422 if (ret) {
3423 unlock_chunks(root);
3424 btrfs_end_transaction(trans, root);
3425 goto done;
3426 }
3427 WARN_ON(diff > old_total);
3428 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3429 unlock_chunks(root);
3430 btrfs_end_transaction(trans, root);
3431 done:
3432 btrfs_free_path(path);
3433 return ret;
3434 }
3435
3436 static int btrfs_add_system_chunk(struct btrfs_root *root,
3437 struct btrfs_key *key,
3438 struct btrfs_chunk *chunk, int item_size)
3439 {
3440 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3441 struct btrfs_disk_key disk_key;
3442 u32 array_size;
3443 u8 *ptr;
3444
3445 array_size = btrfs_super_sys_array_size(super_copy);
3446 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3447 return -EFBIG;
3448
3449 ptr = super_copy->sys_chunk_array + array_size;
3450 btrfs_cpu_key_to_disk(&disk_key, key);
3451 memcpy(ptr, &disk_key, sizeof(disk_key));
3452 ptr += sizeof(disk_key);
3453 memcpy(ptr, chunk, item_size);
3454 item_size += sizeof(disk_key);
3455 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3456 return 0;
3457 }
3458
3459 /*
3460 * sort the devices in descending order by max_avail, total_avail
3461 */
3462 static int btrfs_cmp_device_info(const void *a, const void *b)
3463 {
3464 const struct btrfs_device_info *di_a = a;
3465 const struct btrfs_device_info *di_b = b;
3466
3467 if (di_a->max_avail > di_b->max_avail)
3468 return -1;
3469 if (di_a->max_avail < di_b->max_avail)
3470 return 1;
3471 if (di_a->total_avail > di_b->total_avail)
3472 return -1;
3473 if (di_a->total_avail < di_b->total_avail)
3474 return 1;
3475 return 0;
3476 }
3477
3478 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3479 struct btrfs_root *extent_root,
3480 struct map_lookup **map_ret,
3481 u64 *num_bytes_out, u64 *stripe_size_out,
3482 u64 start, u64 type)
3483 {
3484 struct btrfs_fs_info *info = extent_root->fs_info;
3485 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3486 struct list_head *cur;
3487 struct map_lookup *map = NULL;
3488 struct extent_map_tree *em_tree;
3489 struct extent_map *em;
3490 struct btrfs_device_info *devices_info = NULL;
3491 u64 total_avail;
3492 int num_stripes; /* total number of stripes to allocate */
3493 int sub_stripes; /* sub_stripes info for map */
3494 int dev_stripes; /* stripes per dev */
3495 int devs_max; /* max devs to use */
3496 int devs_min; /* min devs needed */
3497 int devs_increment; /* ndevs has to be a multiple of this */
3498 int ncopies; /* how many copies to data has */
3499 int ret;
3500 u64 max_stripe_size;
3501 u64 max_chunk_size;
3502 u64 stripe_size;
3503 u64 num_bytes;
3504 int ndevs;
3505 int i;
3506 int j;
3507
3508 BUG_ON(!alloc_profile_is_valid(type, 0));
3509
3510 if (list_empty(&fs_devices->alloc_list))
3511 return -ENOSPC;
3512
3513 sub_stripes = 1;
3514 dev_stripes = 1;
3515 devs_increment = 1;
3516 ncopies = 1;
3517 devs_max = 0; /* 0 == as many as possible */
3518 devs_min = 1;
3519
3520 /*
3521 * define the properties of each RAID type.
3522 * FIXME: move this to a global table and use it in all RAID
3523 * calculation code
3524 */
3525 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3526 dev_stripes = 2;
3527 ncopies = 2;
3528 devs_max = 1;
3529 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3530 devs_min = 2;
3531 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3532 devs_increment = 2;
3533 ncopies = 2;
3534 devs_max = 2;
3535 devs_min = 2;
3536 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3537 sub_stripes = 2;
3538 devs_increment = 2;
3539 ncopies = 2;
3540 devs_min = 4;
3541 } else {
3542 devs_max = 1;
3543 }
3544
3545 if (type & BTRFS_BLOCK_GROUP_DATA) {
3546 max_stripe_size = 1024 * 1024 * 1024;
3547 max_chunk_size = 10 * max_stripe_size;
3548 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3549 /* for larger filesystems, use larger metadata chunks */
3550 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3551 max_stripe_size = 1024 * 1024 * 1024;
3552 else
3553 max_stripe_size = 256 * 1024 * 1024;
3554 max_chunk_size = max_stripe_size;
3555 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3556 max_stripe_size = 32 * 1024 * 1024;
3557 max_chunk_size = 2 * max_stripe_size;
3558 } else {
3559 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3560 type);
3561 BUG_ON(1);
3562 }
3563
3564 /* we don't want a chunk larger than 10% of writeable space */
3565 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3566 max_chunk_size);
3567
3568 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3569 GFP_NOFS);
3570 if (!devices_info)
3571 return -ENOMEM;
3572
3573 cur = fs_devices->alloc_list.next;
3574
3575 /*
3576 * in the first pass through the devices list, we gather information
3577 * about the available holes on each device.
3578 */
3579 ndevs = 0;
3580 while (cur != &fs_devices->alloc_list) {
3581 struct btrfs_device *device;
3582 u64 max_avail;
3583 u64 dev_offset;
3584
3585 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3586
3587 cur = cur->next;
3588
3589 if (!device->writeable) {
3590 WARN(1, KERN_ERR
3591 "btrfs: read-only device in alloc_list\n");
3592 continue;
3593 }
3594
3595 if (!device->in_fs_metadata ||
3596 device->is_tgtdev_for_dev_replace)
3597 continue;
3598
3599 if (device->total_bytes > device->bytes_used)
3600 total_avail = device->total_bytes - device->bytes_used;
3601 else
3602 total_avail = 0;
3603
3604 /* If there is no space on this device, skip it. */
3605 if (total_avail == 0)
3606 continue;
3607
3608 ret = find_free_dev_extent(device,
3609 max_stripe_size * dev_stripes,
3610 &dev_offset, &max_avail);
3611 if (ret && ret != -ENOSPC)
3612 goto error;
3613
3614 if (ret == 0)
3615 max_avail = max_stripe_size * dev_stripes;
3616
3617 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3618 continue;
3619
3620 devices_info[ndevs].dev_offset = dev_offset;
3621 devices_info[ndevs].max_avail = max_avail;
3622 devices_info[ndevs].total_avail = total_avail;
3623 devices_info[ndevs].dev = device;
3624 ++ndevs;
3625 WARN_ON(ndevs > fs_devices->rw_devices);
3626 }
3627
3628 /*
3629 * now sort the devices by hole size / available space
3630 */
3631 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3632 btrfs_cmp_device_info, NULL);
3633
3634 /* round down to number of usable stripes */
3635 ndevs -= ndevs % devs_increment;
3636
3637 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3638 ret = -ENOSPC;
3639 goto error;
3640 }
3641
3642 if (devs_max && ndevs > devs_max)
3643 ndevs = devs_max;
3644 /*
3645 * the primary goal is to maximize the number of stripes, so use as many
3646 * devices as possible, even if the stripes are not maximum sized.
3647 */
3648 stripe_size = devices_info[ndevs-1].max_avail;
3649 num_stripes = ndevs * dev_stripes;
3650
3651 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3652 stripe_size = max_chunk_size * ncopies;
3653 do_div(stripe_size, ndevs);
3654 }
3655
3656 do_div(stripe_size, dev_stripes);
3657
3658 /* align to BTRFS_STRIPE_LEN */
3659 do_div(stripe_size, BTRFS_STRIPE_LEN);
3660 stripe_size *= BTRFS_STRIPE_LEN;
3661
3662 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3663 if (!map) {
3664 ret = -ENOMEM;
3665 goto error;
3666 }
3667 map->num_stripes = num_stripes;
3668
3669 for (i = 0; i < ndevs; ++i) {
3670 for (j = 0; j < dev_stripes; ++j) {
3671 int s = i * dev_stripes + j;
3672 map->stripes[s].dev = devices_info[i].dev;
3673 map->stripes[s].physical = devices_info[i].dev_offset +
3674 j * stripe_size;
3675 }
3676 }
3677 map->sector_size = extent_root->sectorsize;
3678 map->stripe_len = BTRFS_STRIPE_LEN;
3679 map->io_align = BTRFS_STRIPE_LEN;
3680 map->io_width = BTRFS_STRIPE_LEN;
3681 map->type = type;
3682 map->sub_stripes = sub_stripes;
3683
3684 *map_ret = map;
3685 num_bytes = stripe_size * (num_stripes / ncopies);
3686
3687 *stripe_size_out = stripe_size;
3688 *num_bytes_out = num_bytes;
3689
3690 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3691
3692 em = alloc_extent_map();
3693 if (!em) {
3694 ret = -ENOMEM;
3695 goto error;
3696 }
3697 em->bdev = (struct block_device *)map;
3698 em->start = start;
3699 em->len = num_bytes;
3700 em->block_start = 0;
3701 em->block_len = em->len;
3702
3703 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3704 write_lock(&em_tree->lock);
3705 ret = add_extent_mapping(em_tree, em);
3706 write_unlock(&em_tree->lock);
3707 free_extent_map(em);
3708 if (ret)
3709 goto error;
3710
3711 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3712 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3713 start, num_bytes);
3714 if (ret)
3715 goto error;
3716
3717 for (i = 0; i < map->num_stripes; ++i) {
3718 struct btrfs_device *device;
3719 u64 dev_offset;
3720
3721 device = map->stripes[i].dev;
3722 dev_offset = map->stripes[i].physical;
3723
3724 ret = btrfs_alloc_dev_extent(trans, device,
3725 info->chunk_root->root_key.objectid,
3726 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3727 start, dev_offset, stripe_size);
3728 if (ret) {
3729 btrfs_abort_transaction(trans, extent_root, ret);
3730 goto error;
3731 }
3732 }
3733
3734 kfree(devices_info);
3735 return 0;
3736
3737 error:
3738 kfree(map);
3739 kfree(devices_info);
3740 return ret;
3741 }
3742
3743 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3744 struct btrfs_root *extent_root,
3745 struct map_lookup *map, u64 chunk_offset,
3746 u64 chunk_size, u64 stripe_size)
3747 {
3748 u64 dev_offset;
3749 struct btrfs_key key;
3750 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3751 struct btrfs_device *device;
3752 struct btrfs_chunk *chunk;
3753 struct btrfs_stripe *stripe;
3754 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3755 int index = 0;
3756 int ret;
3757
3758 chunk = kzalloc(item_size, GFP_NOFS);
3759 if (!chunk)
3760 return -ENOMEM;
3761
3762 index = 0;
3763 while (index < map->num_stripes) {
3764 device = map->stripes[index].dev;
3765 device->bytes_used += stripe_size;
3766 ret = btrfs_update_device(trans, device);
3767 if (ret)
3768 goto out_free;
3769 index++;
3770 }
3771
3772 spin_lock(&extent_root->fs_info->free_chunk_lock);
3773 extent_root->fs_info->free_chunk_space -= (stripe_size *
3774 map->num_stripes);
3775 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3776
3777 index = 0;
3778 stripe = &chunk->stripe;
3779 while (index < map->num_stripes) {
3780 device = map->stripes[index].dev;
3781 dev_offset = map->stripes[index].physical;
3782
3783 btrfs_set_stack_stripe_devid(stripe, device->devid);
3784 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3785 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3786 stripe++;
3787 index++;
3788 }
3789
3790 btrfs_set_stack_chunk_length(chunk, chunk_size);
3791 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3792 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3793 btrfs_set_stack_chunk_type(chunk, map->type);
3794 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3795 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3796 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3797 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3798 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3799
3800 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3801 key.type = BTRFS_CHUNK_ITEM_KEY;
3802 key.offset = chunk_offset;
3803
3804 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3805
3806 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3807 /*
3808 * TODO: Cleanup of inserted chunk root in case of
3809 * failure.
3810 */
3811 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3812 item_size);
3813 }
3814
3815 out_free:
3816 kfree(chunk);
3817 return ret;
3818 }
3819
3820 /*
3821 * Chunk allocation falls into two parts. The first part does works
3822 * that make the new allocated chunk useable, but not do any operation
3823 * that modifies the chunk tree. The second part does the works that
3824 * require modifying the chunk tree. This division is important for the
3825 * bootstrap process of adding storage to a seed btrfs.
3826 */
3827 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3828 struct btrfs_root *extent_root, u64 type)
3829 {
3830 u64 chunk_offset;
3831 u64 chunk_size;
3832 u64 stripe_size;
3833 struct map_lookup *map;
3834 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3835 int ret;
3836
3837 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3838 &chunk_offset);
3839 if (ret)
3840 return ret;
3841
3842 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3843 &stripe_size, chunk_offset, type);
3844 if (ret)
3845 return ret;
3846
3847 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3848 chunk_size, stripe_size);
3849 if (ret)
3850 return ret;
3851 return 0;
3852 }
3853
3854 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3855 struct btrfs_root *root,
3856 struct btrfs_device *device)
3857 {
3858 u64 chunk_offset;
3859 u64 sys_chunk_offset;
3860 u64 chunk_size;
3861 u64 sys_chunk_size;
3862 u64 stripe_size;
3863 u64 sys_stripe_size;
3864 u64 alloc_profile;
3865 struct map_lookup *map;
3866 struct map_lookup *sys_map;
3867 struct btrfs_fs_info *fs_info = root->fs_info;
3868 struct btrfs_root *extent_root = fs_info->extent_root;
3869 int ret;
3870
3871 ret = find_next_chunk(fs_info->chunk_root,
3872 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3873 if (ret)
3874 return ret;
3875
3876 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3877 fs_info->avail_metadata_alloc_bits;
3878 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3879
3880 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3881 &stripe_size, chunk_offset, alloc_profile);
3882 if (ret)
3883 return ret;
3884
3885 sys_chunk_offset = chunk_offset + chunk_size;
3886
3887 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3888 fs_info->avail_system_alloc_bits;
3889 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3890
3891 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3892 &sys_chunk_size, &sys_stripe_size,
3893 sys_chunk_offset, alloc_profile);
3894 if (ret) {
3895 btrfs_abort_transaction(trans, root, ret);
3896 goto out;
3897 }
3898
3899 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3900 if (ret) {
3901 btrfs_abort_transaction(trans, root, ret);
3902 goto out;
3903 }
3904
3905 /*
3906 * Modifying chunk tree needs allocating new blocks from both
3907 * system block group and metadata block group. So we only can
3908 * do operations require modifying the chunk tree after both
3909 * block groups were created.
3910 */
3911 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3912 chunk_size, stripe_size);
3913 if (ret) {
3914 btrfs_abort_transaction(trans, root, ret);
3915 goto out;
3916 }
3917
3918 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3919 sys_chunk_offset, sys_chunk_size,
3920 sys_stripe_size);
3921 if (ret)
3922 btrfs_abort_transaction(trans, root, ret);
3923
3924 out:
3925
3926 return ret;
3927 }
3928
3929 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3930 {
3931 struct extent_map *em;
3932 struct map_lookup *map;
3933 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3934 int readonly = 0;
3935 int i;
3936
3937 read_lock(&map_tree->map_tree.lock);
3938 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3939 read_unlock(&map_tree->map_tree.lock);
3940 if (!em)
3941 return 1;
3942
3943 if (btrfs_test_opt(root, DEGRADED)) {
3944 free_extent_map(em);
3945 return 0;
3946 }
3947
3948 map = (struct map_lookup *)em->bdev;
3949 for (i = 0; i < map->num_stripes; i++) {
3950 if (!map->stripes[i].dev->writeable) {
3951 readonly = 1;
3952 break;
3953 }
3954 }
3955 free_extent_map(em);
3956 return readonly;
3957 }
3958
3959 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3960 {
3961 extent_map_tree_init(&tree->map_tree);
3962 }
3963
3964 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3965 {
3966 struct extent_map *em;
3967
3968 while (1) {
3969 write_lock(&tree->map_tree.lock);
3970 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3971 if (em)
3972 remove_extent_mapping(&tree->map_tree, em);
3973 write_unlock(&tree->map_tree.lock);
3974 if (!em)
3975 break;
3976 kfree(em->bdev);
3977 /* once for us */
3978 free_extent_map(em);
3979 /* once for the tree */
3980 free_extent_map(em);
3981 }
3982 }
3983
3984 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3985 {
3986 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3987 struct extent_map *em;
3988 struct map_lookup *map;
3989 struct extent_map_tree *em_tree = &map_tree->map_tree;
3990 int ret;
3991
3992 read_lock(&em_tree->lock);
3993 em = lookup_extent_mapping(em_tree, logical, len);
3994 read_unlock(&em_tree->lock);
3995 BUG_ON(!em);
3996
3997 BUG_ON(em->start > logical || em->start + em->len < logical);
3998 map = (struct map_lookup *)em->bdev;
3999 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4000 ret = map->num_stripes;
4001 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4002 ret = map->sub_stripes;
4003 else
4004 ret = 1;
4005 free_extent_map(em);
4006
4007 btrfs_dev_replace_lock(&fs_info->dev_replace);
4008 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4009 ret++;
4010 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4011
4012 return ret;
4013 }
4014
4015 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4016 struct map_lookup *map, int first, int num,
4017 int optimal, int dev_replace_is_ongoing)
4018 {
4019 int i;
4020 int tolerance;
4021 struct btrfs_device *srcdev;
4022
4023 if (dev_replace_is_ongoing &&
4024 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4025 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4026 srcdev = fs_info->dev_replace.srcdev;
4027 else
4028 srcdev = NULL;
4029
4030 /*
4031 * try to avoid the drive that is the source drive for a
4032 * dev-replace procedure, only choose it if no other non-missing
4033 * mirror is available
4034 */
4035 for (tolerance = 0; tolerance < 2; tolerance++) {
4036 if (map->stripes[optimal].dev->bdev &&
4037 (tolerance || map->stripes[optimal].dev != srcdev))
4038 return optimal;
4039 for (i = first; i < first + num; i++) {
4040 if (map->stripes[i].dev->bdev &&
4041 (tolerance || map->stripes[i].dev != srcdev))
4042 return i;
4043 }
4044 }
4045
4046 /* we couldn't find one that doesn't fail. Just return something
4047 * and the io error handling code will clean up eventually
4048 */
4049 return optimal;
4050 }
4051
4052 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4053 u64 logical, u64 *length,
4054 struct btrfs_bio **bbio_ret,
4055 int mirror_num)
4056 {
4057 struct extent_map *em;
4058 struct map_lookup *map;
4059 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4060 struct extent_map_tree *em_tree = &map_tree->map_tree;
4061 u64 offset;
4062 u64 stripe_offset;
4063 u64 stripe_end_offset;
4064 u64 stripe_nr;
4065 u64 stripe_nr_orig;
4066 u64 stripe_nr_end;
4067 int stripe_index;
4068 int i;
4069 int ret = 0;
4070 int num_stripes;
4071 int max_errors = 0;
4072 struct btrfs_bio *bbio = NULL;
4073 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4074 int dev_replace_is_ongoing = 0;
4075 int num_alloc_stripes;
4076 int patch_the_first_stripe_for_dev_replace = 0;
4077 u64 physical_to_patch_in_first_stripe = 0;
4078
4079 read_lock(&em_tree->lock);
4080 em = lookup_extent_mapping(em_tree, logical, *length);
4081 read_unlock(&em_tree->lock);
4082
4083 if (!em) {
4084 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
4085 (unsigned long long)logical,
4086 (unsigned long long)*length);
4087 BUG();
4088 }
4089
4090 BUG_ON(em->start > logical || em->start + em->len < logical);
4091 map = (struct map_lookup *)em->bdev;
4092 offset = logical - em->start;
4093
4094 stripe_nr = offset;
4095 /*
4096 * stripe_nr counts the total number of stripes we have to stride
4097 * to get to this block
4098 */
4099 do_div(stripe_nr, map->stripe_len);
4100
4101 stripe_offset = stripe_nr * map->stripe_len;
4102 BUG_ON(offset < stripe_offset);
4103
4104 /* stripe_offset is the offset of this block in its stripe*/
4105 stripe_offset = offset - stripe_offset;
4106
4107 if (rw & REQ_DISCARD)
4108 *length = min_t(u64, em->len - offset, *length);
4109 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4110 /* we limit the length of each bio to what fits in a stripe */
4111 *length = min_t(u64, em->len - offset,
4112 map->stripe_len - stripe_offset);
4113 } else {
4114 *length = em->len - offset;
4115 }
4116
4117 if (!bbio_ret)
4118 goto out;
4119
4120 btrfs_dev_replace_lock(dev_replace);
4121 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4122 if (!dev_replace_is_ongoing)
4123 btrfs_dev_replace_unlock(dev_replace);
4124
4125 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4126 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4127 dev_replace->tgtdev != NULL) {
4128 /*
4129 * in dev-replace case, for repair case (that's the only
4130 * case where the mirror is selected explicitly when
4131 * calling btrfs_map_block), blocks left of the left cursor
4132 * can also be read from the target drive.
4133 * For REQ_GET_READ_MIRRORS, the target drive is added as
4134 * the last one to the array of stripes. For READ, it also
4135 * needs to be supported using the same mirror number.
4136 * If the requested block is not left of the left cursor,
4137 * EIO is returned. This can happen because btrfs_num_copies()
4138 * returns one more in the dev-replace case.
4139 */
4140 u64 tmp_length = *length;
4141 struct btrfs_bio *tmp_bbio = NULL;
4142 int tmp_num_stripes;
4143 u64 srcdev_devid = dev_replace->srcdev->devid;
4144 int index_srcdev = 0;
4145 int found = 0;
4146 u64 physical_of_found = 0;
4147
4148 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4149 logical, &tmp_length, &tmp_bbio, 0);
4150 if (ret) {
4151 WARN_ON(tmp_bbio != NULL);
4152 goto out;
4153 }
4154
4155 tmp_num_stripes = tmp_bbio->num_stripes;
4156 if (mirror_num > tmp_num_stripes) {
4157 /*
4158 * REQ_GET_READ_MIRRORS does not contain this
4159 * mirror, that means that the requested area
4160 * is not left of the left cursor
4161 */
4162 ret = -EIO;
4163 kfree(tmp_bbio);
4164 goto out;
4165 }
4166
4167 /*
4168 * process the rest of the function using the mirror_num
4169 * of the source drive. Therefore look it up first.
4170 * At the end, patch the device pointer to the one of the
4171 * target drive.
4172 */
4173 for (i = 0; i < tmp_num_stripes; i++) {
4174 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4175 /*
4176 * In case of DUP, in order to keep it
4177 * simple, only add the mirror with the
4178 * lowest physical address
4179 */
4180 if (found &&
4181 physical_of_found <=
4182 tmp_bbio->stripes[i].physical)
4183 continue;
4184 index_srcdev = i;
4185 found = 1;
4186 physical_of_found =
4187 tmp_bbio->stripes[i].physical;
4188 }
4189 }
4190
4191 if (found) {
4192 mirror_num = index_srcdev + 1;
4193 patch_the_first_stripe_for_dev_replace = 1;
4194 physical_to_patch_in_first_stripe = physical_of_found;
4195 } else {
4196 WARN_ON(1);
4197 ret = -EIO;
4198 kfree(tmp_bbio);
4199 goto out;
4200 }
4201
4202 kfree(tmp_bbio);
4203 } else if (mirror_num > map->num_stripes) {
4204 mirror_num = 0;
4205 }
4206
4207 num_stripes = 1;
4208 stripe_index = 0;
4209 stripe_nr_orig = stripe_nr;
4210 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
4211 (~(map->stripe_len - 1));
4212 do_div(stripe_nr_end, map->stripe_len);
4213 stripe_end_offset = stripe_nr_end * map->stripe_len -
4214 (offset + *length);
4215 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4216 if (rw & REQ_DISCARD)
4217 num_stripes = min_t(u64, map->num_stripes,
4218 stripe_nr_end - stripe_nr_orig);
4219 stripe_index = do_div(stripe_nr, map->num_stripes);
4220 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4221 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4222 num_stripes = map->num_stripes;
4223 else if (mirror_num)
4224 stripe_index = mirror_num - 1;
4225 else {
4226 stripe_index = find_live_mirror(fs_info, map, 0,
4227 map->num_stripes,
4228 current->pid % map->num_stripes,
4229 dev_replace_is_ongoing);
4230 mirror_num = stripe_index + 1;
4231 }
4232
4233 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4234 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4235 num_stripes = map->num_stripes;
4236 } else if (mirror_num) {
4237 stripe_index = mirror_num - 1;
4238 } else {
4239 mirror_num = 1;
4240 }
4241
4242 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4243 int factor = map->num_stripes / map->sub_stripes;
4244
4245 stripe_index = do_div(stripe_nr, factor);
4246 stripe_index *= map->sub_stripes;
4247
4248 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4249 num_stripes = map->sub_stripes;
4250 else if (rw & REQ_DISCARD)
4251 num_stripes = min_t(u64, map->sub_stripes *
4252 (stripe_nr_end - stripe_nr_orig),
4253 map->num_stripes);
4254 else if (mirror_num)
4255 stripe_index += mirror_num - 1;
4256 else {
4257 int old_stripe_index = stripe_index;
4258 stripe_index = find_live_mirror(fs_info, map,
4259 stripe_index,
4260 map->sub_stripes, stripe_index +
4261 current->pid % map->sub_stripes,
4262 dev_replace_is_ongoing);
4263 mirror_num = stripe_index - old_stripe_index + 1;
4264 }
4265 } else {
4266 /*
4267 * after this do_div call, stripe_nr is the number of stripes
4268 * on this device we have to walk to find the data, and
4269 * stripe_index is the number of our device in the stripe array
4270 */
4271 stripe_index = do_div(stripe_nr, map->num_stripes);
4272 mirror_num = stripe_index + 1;
4273 }
4274 BUG_ON(stripe_index >= map->num_stripes);
4275
4276 num_alloc_stripes = num_stripes;
4277 if (dev_replace_is_ongoing) {
4278 if (rw & (REQ_WRITE | REQ_DISCARD))
4279 num_alloc_stripes <<= 1;
4280 if (rw & REQ_GET_READ_MIRRORS)
4281 num_alloc_stripes++;
4282 }
4283 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4284 if (!bbio) {
4285 ret = -ENOMEM;
4286 goto out;
4287 }
4288 atomic_set(&bbio->error, 0);
4289
4290 if (rw & REQ_DISCARD) {
4291 int factor = 0;
4292 int sub_stripes = 0;
4293 u64 stripes_per_dev = 0;
4294 u32 remaining_stripes = 0;
4295 u32 last_stripe = 0;
4296
4297 if (map->type &
4298 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4299 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4300 sub_stripes = 1;
4301 else
4302 sub_stripes = map->sub_stripes;
4303
4304 factor = map->num_stripes / sub_stripes;
4305 stripes_per_dev = div_u64_rem(stripe_nr_end -
4306 stripe_nr_orig,
4307 factor,
4308 &remaining_stripes);
4309 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4310 last_stripe *= sub_stripes;
4311 }
4312
4313 for (i = 0; i < num_stripes; i++) {
4314 bbio->stripes[i].physical =
4315 map->stripes[stripe_index].physical +
4316 stripe_offset + stripe_nr * map->stripe_len;
4317 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4318
4319 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4320 BTRFS_BLOCK_GROUP_RAID10)) {
4321 bbio->stripes[i].length = stripes_per_dev *
4322 map->stripe_len;
4323
4324 if (i / sub_stripes < remaining_stripes)
4325 bbio->stripes[i].length +=
4326 map->stripe_len;
4327
4328 /*
4329 * Special for the first stripe and
4330 * the last stripe:
4331 *
4332 * |-------|...|-------|
4333 * |----------|
4334 * off end_off
4335 */
4336 if (i < sub_stripes)
4337 bbio->stripes[i].length -=
4338 stripe_offset;
4339
4340 if (stripe_index >= last_stripe &&
4341 stripe_index <= (last_stripe +
4342 sub_stripes - 1))
4343 bbio->stripes[i].length -=
4344 stripe_end_offset;
4345
4346 if (i == sub_stripes - 1)
4347 stripe_offset = 0;
4348 } else
4349 bbio->stripes[i].length = *length;
4350
4351 stripe_index++;
4352 if (stripe_index == map->num_stripes) {
4353 /* This could only happen for RAID0/10 */
4354 stripe_index = 0;
4355 stripe_nr++;
4356 }
4357 }
4358 } else {
4359 for (i = 0; i < num_stripes; i++) {
4360 bbio->stripes[i].physical =
4361 map->stripes[stripe_index].physical +
4362 stripe_offset +
4363 stripe_nr * map->stripe_len;
4364 bbio->stripes[i].dev =
4365 map->stripes[stripe_index].dev;
4366 stripe_index++;
4367 }
4368 }
4369
4370 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4371 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4372 BTRFS_BLOCK_GROUP_RAID10 |
4373 BTRFS_BLOCK_GROUP_DUP)) {
4374 max_errors = 1;
4375 }
4376 }
4377
4378 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4379 dev_replace->tgtdev != NULL) {
4380 int index_where_to_add;
4381 u64 srcdev_devid = dev_replace->srcdev->devid;
4382
4383 /*
4384 * duplicate the write operations while the dev replace
4385 * procedure is running. Since the copying of the old disk
4386 * to the new disk takes place at run time while the
4387 * filesystem is mounted writable, the regular write
4388 * operations to the old disk have to be duplicated to go
4389 * to the new disk as well.
4390 * Note that device->missing is handled by the caller, and
4391 * that the write to the old disk is already set up in the
4392 * stripes array.
4393 */
4394 index_where_to_add = num_stripes;
4395 for (i = 0; i < num_stripes; i++) {
4396 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4397 /* write to new disk, too */
4398 struct btrfs_bio_stripe *new =
4399 bbio->stripes + index_where_to_add;
4400 struct btrfs_bio_stripe *old =
4401 bbio->stripes + i;
4402
4403 new->physical = old->physical;
4404 new->length = old->length;
4405 new->dev = dev_replace->tgtdev;
4406 index_where_to_add++;
4407 max_errors++;
4408 }
4409 }
4410 num_stripes = index_where_to_add;
4411 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4412 dev_replace->tgtdev != NULL) {
4413 u64 srcdev_devid = dev_replace->srcdev->devid;
4414 int index_srcdev = 0;
4415 int found = 0;
4416 u64 physical_of_found = 0;
4417
4418 /*
4419 * During the dev-replace procedure, the target drive can
4420 * also be used to read data in case it is needed to repair
4421 * a corrupt block elsewhere. This is possible if the
4422 * requested area is left of the left cursor. In this area,
4423 * the target drive is a full copy of the source drive.
4424 */
4425 for (i = 0; i < num_stripes; i++) {
4426 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4427 /*
4428 * In case of DUP, in order to keep it
4429 * simple, only add the mirror with the
4430 * lowest physical address
4431 */
4432 if (found &&
4433 physical_of_found <=
4434 bbio->stripes[i].physical)
4435 continue;
4436 index_srcdev = i;
4437 found = 1;
4438 physical_of_found = bbio->stripes[i].physical;
4439 }
4440 }
4441 if (found) {
4442 u64 length = map->stripe_len;
4443
4444 if (physical_of_found + length <=
4445 dev_replace->cursor_left) {
4446 struct btrfs_bio_stripe *tgtdev_stripe =
4447 bbio->stripes + num_stripes;
4448
4449 tgtdev_stripe->physical = physical_of_found;
4450 tgtdev_stripe->length =
4451 bbio->stripes[index_srcdev].length;
4452 tgtdev_stripe->dev = dev_replace->tgtdev;
4453
4454 num_stripes++;
4455 }
4456 }
4457 }
4458
4459 *bbio_ret = bbio;
4460 bbio->num_stripes = num_stripes;
4461 bbio->max_errors = max_errors;
4462 bbio->mirror_num = mirror_num;
4463
4464 /*
4465 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4466 * mirror_num == num_stripes + 1 && dev_replace target drive is
4467 * available as a mirror
4468 */
4469 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4470 WARN_ON(num_stripes > 1);
4471 bbio->stripes[0].dev = dev_replace->tgtdev;
4472 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4473 bbio->mirror_num = map->num_stripes + 1;
4474 }
4475 out:
4476 if (dev_replace_is_ongoing)
4477 btrfs_dev_replace_unlock(dev_replace);
4478 free_extent_map(em);
4479 return ret;
4480 }
4481
4482 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4483 u64 logical, u64 *length,
4484 struct btrfs_bio **bbio_ret, int mirror_num)
4485 {
4486 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4487 mirror_num);
4488 }
4489
4490 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4491 u64 chunk_start, u64 physical, u64 devid,
4492 u64 **logical, int *naddrs, int *stripe_len)
4493 {
4494 struct extent_map_tree *em_tree = &map_tree->map_tree;
4495 struct extent_map *em;
4496 struct map_lookup *map;
4497 u64 *buf;
4498 u64 bytenr;
4499 u64 length;
4500 u64 stripe_nr;
4501 int i, j, nr = 0;
4502
4503 read_lock(&em_tree->lock);
4504 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4505 read_unlock(&em_tree->lock);
4506
4507 BUG_ON(!em || em->start != chunk_start);
4508 map = (struct map_lookup *)em->bdev;
4509
4510 length = em->len;
4511 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4512 do_div(length, map->num_stripes / map->sub_stripes);
4513 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4514 do_div(length, map->num_stripes);
4515
4516 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4517 BUG_ON(!buf); /* -ENOMEM */
4518
4519 for (i = 0; i < map->num_stripes; i++) {
4520 if (devid && map->stripes[i].dev->devid != devid)
4521 continue;
4522 if (map->stripes[i].physical > physical ||
4523 map->stripes[i].physical + length <= physical)
4524 continue;
4525
4526 stripe_nr = physical - map->stripes[i].physical;
4527 do_div(stripe_nr, map->stripe_len);
4528
4529 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4530 stripe_nr = stripe_nr * map->num_stripes + i;
4531 do_div(stripe_nr, map->sub_stripes);
4532 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4533 stripe_nr = stripe_nr * map->num_stripes + i;
4534 }
4535 bytenr = chunk_start + stripe_nr * map->stripe_len;
4536 WARN_ON(nr >= map->num_stripes);
4537 for (j = 0; j < nr; j++) {
4538 if (buf[j] == bytenr)
4539 break;
4540 }
4541 if (j == nr) {
4542 WARN_ON(nr >= map->num_stripes);
4543 buf[nr++] = bytenr;
4544 }
4545 }
4546
4547 *logical = buf;
4548 *naddrs = nr;
4549 *stripe_len = map->stripe_len;
4550
4551 free_extent_map(em);
4552 return 0;
4553 }
4554
4555 static void *merge_stripe_index_into_bio_private(void *bi_private,
4556 unsigned int stripe_index)
4557 {
4558 /*
4559 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4560 * at most 1.
4561 * The alternative solution (instead of stealing bits from the
4562 * pointer) would be to allocate an intermediate structure
4563 * that contains the old private pointer plus the stripe_index.
4564 */
4565 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4566 BUG_ON(stripe_index > 3);
4567 return (void *)(((uintptr_t)bi_private) | stripe_index);
4568 }
4569
4570 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4571 {
4572 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4573 }
4574
4575 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4576 {
4577 return (unsigned int)((uintptr_t)bi_private) & 3;
4578 }
4579
4580 static void btrfs_end_bio(struct bio *bio, int err)
4581 {
4582 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4583 int is_orig_bio = 0;
4584
4585 if (err) {
4586 atomic_inc(&bbio->error);
4587 if (err == -EIO || err == -EREMOTEIO) {
4588 unsigned int stripe_index =
4589 extract_stripe_index_from_bio_private(
4590 bio->bi_private);
4591 struct btrfs_device *dev;
4592
4593 BUG_ON(stripe_index >= bbio->num_stripes);
4594 dev = bbio->stripes[stripe_index].dev;
4595 if (dev->bdev) {
4596 if (bio->bi_rw & WRITE)
4597 btrfs_dev_stat_inc(dev,
4598 BTRFS_DEV_STAT_WRITE_ERRS);
4599 else
4600 btrfs_dev_stat_inc(dev,
4601 BTRFS_DEV_STAT_READ_ERRS);
4602 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4603 btrfs_dev_stat_inc(dev,
4604 BTRFS_DEV_STAT_FLUSH_ERRS);
4605 btrfs_dev_stat_print_on_error(dev);
4606 }
4607 }
4608 }
4609
4610 if (bio == bbio->orig_bio)
4611 is_orig_bio = 1;
4612
4613 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4614 if (!is_orig_bio) {
4615 bio_put(bio);
4616 bio = bbio->orig_bio;
4617 }
4618 bio->bi_private = bbio->private;
4619 bio->bi_end_io = bbio->end_io;
4620 bio->bi_bdev = (struct block_device *)
4621 (unsigned long)bbio->mirror_num;
4622 /* only send an error to the higher layers if it is
4623 * beyond the tolerance of the multi-bio
4624 */
4625 if (atomic_read(&bbio->error) > bbio->max_errors) {
4626 err = -EIO;
4627 } else {
4628 /*
4629 * this bio is actually up to date, we didn't
4630 * go over the max number of errors
4631 */
4632 set_bit(BIO_UPTODATE, &bio->bi_flags);
4633 err = 0;
4634 }
4635 kfree(bbio);
4636
4637 bio_endio(bio, err);
4638 } else if (!is_orig_bio) {
4639 bio_put(bio);
4640 }
4641 }
4642
4643 struct async_sched {
4644 struct bio *bio;
4645 int rw;
4646 struct btrfs_fs_info *info;
4647 struct btrfs_work work;
4648 };
4649
4650 /*
4651 * see run_scheduled_bios for a description of why bios are collected for
4652 * async submit.
4653 *
4654 * This will add one bio to the pending list for a device and make sure
4655 * the work struct is scheduled.
4656 */
4657 static noinline void schedule_bio(struct btrfs_root *root,
4658 struct btrfs_device *device,
4659 int rw, struct bio *bio)
4660 {
4661 int should_queue = 1;
4662 struct btrfs_pending_bios *pending_bios;
4663
4664 /* don't bother with additional async steps for reads, right now */
4665 if (!(rw & REQ_WRITE)) {
4666 bio_get(bio);
4667 btrfsic_submit_bio(rw, bio);
4668 bio_put(bio);
4669 return;
4670 }
4671
4672 /*
4673 * nr_async_bios allows us to reliably return congestion to the
4674 * higher layers. Otherwise, the async bio makes it appear we have
4675 * made progress against dirty pages when we've really just put it
4676 * on a queue for later
4677 */
4678 atomic_inc(&root->fs_info->nr_async_bios);
4679 WARN_ON(bio->bi_next);
4680 bio->bi_next = NULL;
4681 bio->bi_rw |= rw;
4682
4683 spin_lock(&device->io_lock);
4684 if (bio->bi_rw & REQ_SYNC)
4685 pending_bios = &device->pending_sync_bios;
4686 else
4687 pending_bios = &device->pending_bios;
4688
4689 if (pending_bios->tail)
4690 pending_bios->tail->bi_next = bio;
4691
4692 pending_bios->tail = bio;
4693 if (!pending_bios->head)
4694 pending_bios->head = bio;
4695 if (device->running_pending)
4696 should_queue = 0;
4697
4698 spin_unlock(&device->io_lock);
4699
4700 if (should_queue)
4701 btrfs_queue_worker(&root->fs_info->submit_workers,
4702 &device->work);
4703 }
4704
4705 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4706 sector_t sector)
4707 {
4708 struct bio_vec *prev;
4709 struct request_queue *q = bdev_get_queue(bdev);
4710 unsigned short max_sectors = queue_max_sectors(q);
4711 struct bvec_merge_data bvm = {
4712 .bi_bdev = bdev,
4713 .bi_sector = sector,
4714 .bi_rw = bio->bi_rw,
4715 };
4716
4717 if (bio->bi_vcnt == 0) {
4718 WARN_ON(1);
4719 return 1;
4720 }
4721
4722 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4723 if ((bio->bi_size >> 9) > max_sectors)
4724 return 0;
4725
4726 if (!q->merge_bvec_fn)
4727 return 1;
4728
4729 bvm.bi_size = bio->bi_size - prev->bv_len;
4730 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4731 return 0;
4732 return 1;
4733 }
4734
4735 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4736 struct bio *bio, u64 physical, int dev_nr,
4737 int rw, int async)
4738 {
4739 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4740
4741 bio->bi_private = bbio;
4742 bio->bi_private = merge_stripe_index_into_bio_private(
4743 bio->bi_private, (unsigned int)dev_nr);
4744 bio->bi_end_io = btrfs_end_bio;
4745 bio->bi_sector = physical >> 9;
4746 #ifdef DEBUG
4747 {
4748 struct rcu_string *name;
4749
4750 rcu_read_lock();
4751 name = rcu_dereference(dev->name);
4752 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4753 "(%s id %llu), size=%u\n", rw,
4754 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4755 name->str, dev->devid, bio->bi_size);
4756 rcu_read_unlock();
4757 }
4758 #endif
4759 bio->bi_bdev = dev->bdev;
4760 if (async)
4761 schedule_bio(root, dev, rw, bio);
4762 else
4763 btrfsic_submit_bio(rw, bio);
4764 }
4765
4766 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4767 struct bio *first_bio, struct btrfs_device *dev,
4768 int dev_nr, int rw, int async)
4769 {
4770 struct bio_vec *bvec = first_bio->bi_io_vec;
4771 struct bio *bio;
4772 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4773 u64 physical = bbio->stripes[dev_nr].physical;
4774
4775 again:
4776 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4777 if (!bio)
4778 return -ENOMEM;
4779
4780 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4781 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4782 bvec->bv_offset) < bvec->bv_len) {
4783 u64 len = bio->bi_size;
4784
4785 atomic_inc(&bbio->stripes_pending);
4786 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4787 rw, async);
4788 physical += len;
4789 goto again;
4790 }
4791 bvec++;
4792 }
4793
4794 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4795 return 0;
4796 }
4797
4798 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4799 {
4800 atomic_inc(&bbio->error);
4801 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4802 bio->bi_private = bbio->private;
4803 bio->bi_end_io = bbio->end_io;
4804 bio->bi_bdev = (struct block_device *)
4805 (unsigned long)bbio->mirror_num;
4806 bio->bi_sector = logical >> 9;
4807 kfree(bbio);
4808 bio_endio(bio, -EIO);
4809 }
4810 }
4811
4812 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4813 int mirror_num, int async_submit)
4814 {
4815 struct btrfs_device *dev;
4816 struct bio *first_bio = bio;
4817 u64 logical = (u64)bio->bi_sector << 9;
4818 u64 length = 0;
4819 u64 map_length;
4820 int ret;
4821 int dev_nr = 0;
4822 int total_devs = 1;
4823 struct btrfs_bio *bbio = NULL;
4824
4825 length = bio->bi_size;
4826 map_length = length;
4827
4828 ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
4829 mirror_num);
4830 if (ret)
4831 return ret;
4832
4833 total_devs = bbio->num_stripes;
4834 if (map_length < length) {
4835 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4836 "len %llu\n", (unsigned long long)logical,
4837 (unsigned long long)length,
4838 (unsigned long long)map_length);
4839 BUG();
4840 }
4841
4842 bbio->orig_bio = first_bio;
4843 bbio->private = first_bio->bi_private;
4844 bbio->end_io = first_bio->bi_end_io;
4845 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4846
4847 while (dev_nr < total_devs) {
4848 dev = bbio->stripes[dev_nr].dev;
4849 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4850 bbio_error(bbio, first_bio, logical);
4851 dev_nr++;
4852 continue;
4853 }
4854
4855 /*
4856 * Check and see if we're ok with this bio based on it's size
4857 * and offset with the given device.
4858 */
4859 if (!bio_size_ok(dev->bdev, first_bio,
4860 bbio->stripes[dev_nr].physical >> 9)) {
4861 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4862 dev_nr, rw, async_submit);
4863 BUG_ON(ret);
4864 dev_nr++;
4865 continue;
4866 }
4867
4868 if (dev_nr < total_devs - 1) {
4869 bio = bio_clone(first_bio, GFP_NOFS);
4870 BUG_ON(!bio); /* -ENOMEM */
4871 } else {
4872 bio = first_bio;
4873 }
4874
4875 submit_stripe_bio(root, bbio, bio,
4876 bbio->stripes[dev_nr].physical, dev_nr, rw,
4877 async_submit);
4878 dev_nr++;
4879 }
4880 return 0;
4881 }
4882
4883 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
4884 u8 *uuid, u8 *fsid)
4885 {
4886 struct btrfs_device *device;
4887 struct btrfs_fs_devices *cur_devices;
4888
4889 cur_devices = fs_info->fs_devices;
4890 while (cur_devices) {
4891 if (!fsid ||
4892 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4893 device = __find_device(&cur_devices->devices,
4894 devid, uuid);
4895 if (device)
4896 return device;
4897 }
4898 cur_devices = cur_devices->seed;
4899 }
4900 return NULL;
4901 }
4902
4903 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4904 u64 devid, u8 *dev_uuid)
4905 {
4906 struct btrfs_device *device;
4907 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4908
4909 device = kzalloc(sizeof(*device), GFP_NOFS);
4910 if (!device)
4911 return NULL;
4912 list_add(&device->dev_list,
4913 &fs_devices->devices);
4914 device->dev_root = root->fs_info->dev_root;
4915 device->devid = devid;
4916 device->work.func = pending_bios_fn;
4917 device->fs_devices = fs_devices;
4918 device->missing = 1;
4919 fs_devices->num_devices++;
4920 fs_devices->missing_devices++;
4921 spin_lock_init(&device->io_lock);
4922 INIT_LIST_HEAD(&device->dev_alloc_list);
4923 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4924 return device;
4925 }
4926
4927 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4928 struct extent_buffer *leaf,
4929 struct btrfs_chunk *chunk)
4930 {
4931 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4932 struct map_lookup *map;
4933 struct extent_map *em;
4934 u64 logical;
4935 u64 length;
4936 u64 devid;
4937 u8 uuid[BTRFS_UUID_SIZE];
4938 int num_stripes;
4939 int ret;
4940 int i;
4941
4942 logical = key->offset;
4943 length = btrfs_chunk_length(leaf, chunk);
4944
4945 read_lock(&map_tree->map_tree.lock);
4946 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4947 read_unlock(&map_tree->map_tree.lock);
4948
4949 /* already mapped? */
4950 if (em && em->start <= logical && em->start + em->len > logical) {
4951 free_extent_map(em);
4952 return 0;
4953 } else if (em) {
4954 free_extent_map(em);
4955 }
4956
4957 em = alloc_extent_map();
4958 if (!em)
4959 return -ENOMEM;
4960 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4961 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4962 if (!map) {
4963 free_extent_map(em);
4964 return -ENOMEM;
4965 }
4966
4967 em->bdev = (struct block_device *)map;
4968 em->start = logical;
4969 em->len = length;
4970 em->block_start = 0;
4971 em->block_len = em->len;
4972
4973 map->num_stripes = num_stripes;
4974 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4975 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4976 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4977 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4978 map->type = btrfs_chunk_type(leaf, chunk);
4979 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4980 for (i = 0; i < num_stripes; i++) {
4981 map->stripes[i].physical =
4982 btrfs_stripe_offset_nr(leaf, chunk, i);
4983 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4984 read_extent_buffer(leaf, uuid, (unsigned long)
4985 btrfs_stripe_dev_uuid_nr(chunk, i),
4986 BTRFS_UUID_SIZE);
4987 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
4988 uuid, NULL);
4989 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4990 kfree(map);
4991 free_extent_map(em);
4992 return -EIO;
4993 }
4994 if (!map->stripes[i].dev) {
4995 map->stripes[i].dev =
4996 add_missing_dev(root, devid, uuid);
4997 if (!map->stripes[i].dev) {
4998 kfree(map);
4999 free_extent_map(em);
5000 return -EIO;
5001 }
5002 }
5003 map->stripes[i].dev->in_fs_metadata = 1;
5004 }
5005
5006 write_lock(&map_tree->map_tree.lock);
5007 ret = add_extent_mapping(&map_tree->map_tree, em);
5008 write_unlock(&map_tree->map_tree.lock);
5009 BUG_ON(ret); /* Tree corruption */
5010 free_extent_map(em);
5011
5012 return 0;
5013 }
5014
5015 static void fill_device_from_item(struct extent_buffer *leaf,
5016 struct btrfs_dev_item *dev_item,
5017 struct btrfs_device *device)
5018 {
5019 unsigned long ptr;
5020
5021 device->devid = btrfs_device_id(leaf, dev_item);
5022 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5023 device->total_bytes = device->disk_total_bytes;
5024 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5025 device->type = btrfs_device_type(leaf, dev_item);
5026 device->io_align = btrfs_device_io_align(leaf, dev_item);
5027 device->io_width = btrfs_device_io_width(leaf, dev_item);
5028 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5029 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5030 device->is_tgtdev_for_dev_replace = 0;
5031
5032 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5033 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5034 }
5035
5036 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5037 {
5038 struct btrfs_fs_devices *fs_devices;
5039 int ret;
5040
5041 BUG_ON(!mutex_is_locked(&uuid_mutex));
5042
5043 fs_devices = root->fs_info->fs_devices->seed;
5044 while (fs_devices) {
5045 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5046 ret = 0;
5047 goto out;
5048 }
5049 fs_devices = fs_devices->seed;
5050 }
5051
5052 fs_devices = find_fsid(fsid);
5053 if (!fs_devices) {
5054 ret = -ENOENT;
5055 goto out;
5056 }
5057
5058 fs_devices = clone_fs_devices(fs_devices);
5059 if (IS_ERR(fs_devices)) {
5060 ret = PTR_ERR(fs_devices);
5061 goto out;
5062 }
5063
5064 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5065 root->fs_info->bdev_holder);
5066 if (ret) {
5067 free_fs_devices(fs_devices);
5068 goto out;
5069 }
5070
5071 if (!fs_devices->seeding) {
5072 __btrfs_close_devices(fs_devices);
5073 free_fs_devices(fs_devices);
5074 ret = -EINVAL;
5075 goto out;
5076 }
5077
5078 fs_devices->seed = root->fs_info->fs_devices->seed;
5079 root->fs_info->fs_devices->seed = fs_devices;
5080 out:
5081 return ret;
5082 }
5083
5084 static int read_one_dev(struct btrfs_root *root,
5085 struct extent_buffer *leaf,
5086 struct btrfs_dev_item *dev_item)
5087 {
5088 struct btrfs_device *device;
5089 u64 devid;
5090 int ret;
5091 u8 fs_uuid[BTRFS_UUID_SIZE];
5092 u8 dev_uuid[BTRFS_UUID_SIZE];
5093
5094 devid = btrfs_device_id(leaf, dev_item);
5095 read_extent_buffer(leaf, dev_uuid,
5096 (unsigned long)btrfs_device_uuid(dev_item),
5097 BTRFS_UUID_SIZE);
5098 read_extent_buffer(leaf, fs_uuid,
5099 (unsigned long)btrfs_device_fsid(dev_item),
5100 BTRFS_UUID_SIZE);
5101
5102 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5103 ret = open_seed_devices(root, fs_uuid);
5104 if (ret && !btrfs_test_opt(root, DEGRADED))
5105 return ret;
5106 }
5107
5108 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5109 if (!device || !device->bdev) {
5110 if (!btrfs_test_opt(root, DEGRADED))
5111 return -EIO;
5112
5113 if (!device) {
5114 printk(KERN_WARNING "warning devid %llu missing\n",
5115 (unsigned long long)devid);
5116 device = add_missing_dev(root, devid, dev_uuid);
5117 if (!device)
5118 return -ENOMEM;
5119 } else if (!device->missing) {
5120 /*
5121 * this happens when a device that was properly setup
5122 * in the device info lists suddenly goes bad.
5123 * device->bdev is NULL, and so we have to set
5124 * device->missing to one here
5125 */
5126 root->fs_info->fs_devices->missing_devices++;
5127 device->missing = 1;
5128 }
5129 }
5130
5131 if (device->fs_devices != root->fs_info->fs_devices) {
5132 BUG_ON(device->writeable);
5133 if (device->generation !=
5134 btrfs_device_generation(leaf, dev_item))
5135 return -EINVAL;
5136 }
5137
5138 fill_device_from_item(leaf, dev_item, device);
5139 device->dev_root = root->fs_info->dev_root;
5140 device->in_fs_metadata = 1;
5141 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5142 device->fs_devices->total_rw_bytes += device->total_bytes;
5143 spin_lock(&root->fs_info->free_chunk_lock);
5144 root->fs_info->free_chunk_space += device->total_bytes -
5145 device->bytes_used;
5146 spin_unlock(&root->fs_info->free_chunk_lock);
5147 }
5148 ret = 0;
5149 return ret;
5150 }
5151
5152 int btrfs_read_sys_array(struct btrfs_root *root)
5153 {
5154 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5155 struct extent_buffer *sb;
5156 struct btrfs_disk_key *disk_key;
5157 struct btrfs_chunk *chunk;
5158 u8 *ptr;
5159 unsigned long sb_ptr;
5160 int ret = 0;
5161 u32 num_stripes;
5162 u32 array_size;
5163 u32 len = 0;
5164 u32 cur;
5165 struct btrfs_key key;
5166
5167 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5168 BTRFS_SUPER_INFO_SIZE);
5169 if (!sb)
5170 return -ENOMEM;
5171 btrfs_set_buffer_uptodate(sb);
5172 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5173 /*
5174 * The sb extent buffer is artifical and just used to read the system array.
5175 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5176 * pages up-to-date when the page is larger: extent does not cover the
5177 * whole page and consequently check_page_uptodate does not find all
5178 * the page's extents up-to-date (the hole beyond sb),
5179 * write_extent_buffer then triggers a WARN_ON.
5180 *
5181 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5182 * but sb spans only this function. Add an explicit SetPageUptodate call
5183 * to silence the warning eg. on PowerPC 64.
5184 */
5185 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5186 SetPageUptodate(sb->pages[0]);
5187
5188 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5189 array_size = btrfs_super_sys_array_size(super_copy);
5190
5191 ptr = super_copy->sys_chunk_array;
5192 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5193 cur = 0;
5194
5195 while (cur < array_size) {
5196 disk_key = (struct btrfs_disk_key *)ptr;
5197 btrfs_disk_key_to_cpu(&key, disk_key);
5198
5199 len = sizeof(*disk_key); ptr += len;
5200 sb_ptr += len;
5201 cur += len;
5202
5203 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5204 chunk = (struct btrfs_chunk *)sb_ptr;
5205 ret = read_one_chunk(root, &key, sb, chunk);
5206 if (ret)
5207 break;
5208 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5209 len = btrfs_chunk_item_size(num_stripes);
5210 } else {
5211 ret = -EIO;
5212 break;
5213 }
5214 ptr += len;
5215 sb_ptr += len;
5216 cur += len;
5217 }
5218 free_extent_buffer(sb);
5219 return ret;
5220 }
5221
5222 int btrfs_read_chunk_tree(struct btrfs_root *root)
5223 {
5224 struct btrfs_path *path;
5225 struct extent_buffer *leaf;
5226 struct btrfs_key key;
5227 struct btrfs_key found_key;
5228 int ret;
5229 int slot;
5230
5231 root = root->fs_info->chunk_root;
5232
5233 path = btrfs_alloc_path();
5234 if (!path)
5235 return -ENOMEM;
5236
5237 mutex_lock(&uuid_mutex);
5238 lock_chunks(root);
5239
5240 /* first we search for all of the device items, and then we
5241 * read in all of the chunk items. This way we can create chunk
5242 * mappings that reference all of the devices that are afound
5243 */
5244 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5245 key.offset = 0;
5246 key.type = 0;
5247 again:
5248 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5249 if (ret < 0)
5250 goto error;
5251 while (1) {
5252 leaf = path->nodes[0];
5253 slot = path->slots[0];
5254 if (slot >= btrfs_header_nritems(leaf)) {
5255 ret = btrfs_next_leaf(root, path);
5256 if (ret == 0)
5257 continue;
5258 if (ret < 0)
5259 goto error;
5260 break;
5261 }
5262 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5263 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5264 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5265 break;
5266 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5267 struct btrfs_dev_item *dev_item;
5268 dev_item = btrfs_item_ptr(leaf, slot,
5269 struct btrfs_dev_item);
5270 ret = read_one_dev(root, leaf, dev_item);
5271 if (ret)
5272 goto error;
5273 }
5274 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5275 struct btrfs_chunk *chunk;
5276 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5277 ret = read_one_chunk(root, &found_key, leaf, chunk);
5278 if (ret)
5279 goto error;
5280 }
5281 path->slots[0]++;
5282 }
5283 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5284 key.objectid = 0;
5285 btrfs_release_path(path);
5286 goto again;
5287 }
5288 ret = 0;
5289 error:
5290 unlock_chunks(root);
5291 mutex_unlock(&uuid_mutex);
5292
5293 btrfs_free_path(path);
5294 return ret;
5295 }
5296
5297 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5298 {
5299 int i;
5300
5301 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5302 btrfs_dev_stat_reset(dev, i);
5303 }
5304
5305 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5306 {
5307 struct btrfs_key key;
5308 struct btrfs_key found_key;
5309 struct btrfs_root *dev_root = fs_info->dev_root;
5310 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5311 struct extent_buffer *eb;
5312 int slot;
5313 int ret = 0;
5314 struct btrfs_device *device;
5315 struct btrfs_path *path = NULL;
5316 int i;
5317
5318 path = btrfs_alloc_path();
5319 if (!path) {
5320 ret = -ENOMEM;
5321 goto out;
5322 }
5323
5324 mutex_lock(&fs_devices->device_list_mutex);
5325 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5326 int item_size;
5327 struct btrfs_dev_stats_item *ptr;
5328
5329 key.objectid = 0;
5330 key.type = BTRFS_DEV_STATS_KEY;
5331 key.offset = device->devid;
5332 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5333 if (ret) {
5334 __btrfs_reset_dev_stats(device);
5335 device->dev_stats_valid = 1;
5336 btrfs_release_path(path);
5337 continue;
5338 }
5339 slot = path->slots[0];
5340 eb = path->nodes[0];
5341 btrfs_item_key_to_cpu(eb, &found_key, slot);
5342 item_size = btrfs_item_size_nr(eb, slot);
5343
5344 ptr = btrfs_item_ptr(eb, slot,
5345 struct btrfs_dev_stats_item);
5346
5347 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5348 if (item_size >= (1 + i) * sizeof(__le64))
5349 btrfs_dev_stat_set(device, i,
5350 btrfs_dev_stats_value(eb, ptr, i));
5351 else
5352 btrfs_dev_stat_reset(device, i);
5353 }
5354
5355 device->dev_stats_valid = 1;
5356 btrfs_dev_stat_print_on_load(device);
5357 btrfs_release_path(path);
5358 }
5359 mutex_unlock(&fs_devices->device_list_mutex);
5360
5361 out:
5362 btrfs_free_path(path);
5363 return ret < 0 ? ret : 0;
5364 }
5365
5366 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5367 struct btrfs_root *dev_root,
5368 struct btrfs_device *device)
5369 {
5370 struct btrfs_path *path;
5371 struct btrfs_key key;
5372 struct extent_buffer *eb;
5373 struct btrfs_dev_stats_item *ptr;
5374 int ret;
5375 int i;
5376
5377 key.objectid = 0;
5378 key.type = BTRFS_DEV_STATS_KEY;
5379 key.offset = device->devid;
5380
5381 path = btrfs_alloc_path();
5382 BUG_ON(!path);
5383 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5384 if (ret < 0) {
5385 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5386 ret, rcu_str_deref(device->name));
5387 goto out;
5388 }
5389
5390 if (ret == 0 &&
5391 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5392 /* need to delete old one and insert a new one */
5393 ret = btrfs_del_item(trans, dev_root, path);
5394 if (ret != 0) {
5395 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5396 rcu_str_deref(device->name), ret);
5397 goto out;
5398 }
5399 ret = 1;
5400 }
5401
5402 if (ret == 1) {
5403 /* need to insert a new item */
5404 btrfs_release_path(path);
5405 ret = btrfs_insert_empty_item(trans, dev_root, path,
5406 &key, sizeof(*ptr));
5407 if (ret < 0) {
5408 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5409 rcu_str_deref(device->name), ret);
5410 goto out;
5411 }
5412 }
5413
5414 eb = path->nodes[0];
5415 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5416 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5417 btrfs_set_dev_stats_value(eb, ptr, i,
5418 btrfs_dev_stat_read(device, i));
5419 btrfs_mark_buffer_dirty(eb);
5420
5421 out:
5422 btrfs_free_path(path);
5423 return ret;
5424 }
5425
5426 /*
5427 * called from commit_transaction. Writes all changed device stats to disk.
5428 */
5429 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5430 struct btrfs_fs_info *fs_info)
5431 {
5432 struct btrfs_root *dev_root = fs_info->dev_root;
5433 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5434 struct btrfs_device *device;
5435 int ret = 0;
5436
5437 mutex_lock(&fs_devices->device_list_mutex);
5438 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5439 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5440 continue;
5441
5442 ret = update_dev_stat_item(trans, dev_root, device);
5443 if (!ret)
5444 device->dev_stats_dirty = 0;
5445 }
5446 mutex_unlock(&fs_devices->device_list_mutex);
5447
5448 return ret;
5449 }
5450
5451 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5452 {
5453 btrfs_dev_stat_inc(dev, index);
5454 btrfs_dev_stat_print_on_error(dev);
5455 }
5456
5457 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5458 {
5459 if (!dev->dev_stats_valid)
5460 return;
5461 printk_ratelimited_in_rcu(KERN_ERR
5462 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5463 rcu_str_deref(dev->name),
5464 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5465 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5466 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5467 btrfs_dev_stat_read(dev,
5468 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5469 btrfs_dev_stat_read(dev,
5470 BTRFS_DEV_STAT_GENERATION_ERRS));
5471 }
5472
5473 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5474 {
5475 int i;
5476
5477 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5478 if (btrfs_dev_stat_read(dev, i) != 0)
5479 break;
5480 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5481 return; /* all values == 0, suppress message */
5482
5483 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5484 rcu_str_deref(dev->name),
5485 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5486 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5487 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5488 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5489 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5490 }
5491
5492 int btrfs_get_dev_stats(struct btrfs_root *root,
5493 struct btrfs_ioctl_get_dev_stats *stats)
5494 {
5495 struct btrfs_device *dev;
5496 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5497 int i;
5498
5499 mutex_lock(&fs_devices->device_list_mutex);
5500 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5501 mutex_unlock(&fs_devices->device_list_mutex);
5502
5503 if (!dev) {
5504 printk(KERN_WARNING
5505 "btrfs: get dev_stats failed, device not found\n");
5506 return -ENODEV;
5507 } else if (!dev->dev_stats_valid) {
5508 printk(KERN_WARNING
5509 "btrfs: get dev_stats failed, not yet valid\n");
5510 return -ENODEV;
5511 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5512 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5513 if (stats->nr_items > i)
5514 stats->values[i] =
5515 btrfs_dev_stat_read_and_reset(dev, i);
5516 else
5517 btrfs_dev_stat_reset(dev, i);
5518 }
5519 } else {
5520 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5521 if (stats->nr_items > i)
5522 stats->values[i] = btrfs_dev_stat_read(dev, i);
5523 }
5524 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5525 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5526 return 0;
5527 }
5528
5529 int btrfs_scratch_superblock(struct btrfs_device *device)
5530 {
5531 struct buffer_head *bh;
5532 struct btrfs_super_block *disk_super;
5533
5534 bh = btrfs_read_dev_super(device->bdev);
5535 if (!bh)
5536 return -EINVAL;
5537 disk_super = (struct btrfs_super_block *)bh->b_data;
5538
5539 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5540 set_buffer_dirty(bh);
5541 sync_dirty_buffer(bh);
5542 brelse(bh);
5543
5544 return 0;
5545 }
This page took 0.140242 seconds and 6 git commands to generate.