Btrfs: disallow mutually exclusive admin operations from user mode
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
46
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
49
50 static void lock_chunks(struct btrfs_root *root)
51 {
52 mutex_lock(&root->fs_info->chunk_mutex);
53 }
54
55 static void unlock_chunks(struct btrfs_root *root)
56 {
57 mutex_unlock(&root->fs_info->chunk_mutex);
58 }
59
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
61 {
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
69 kfree(device);
70 }
71 kfree(fs_devices);
72 }
73
74 void btrfs_cleanup_fs_uuids(void)
75 {
76 struct btrfs_fs_devices *fs_devices;
77
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
83 }
84 }
85
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
87 u64 devid, u8 *uuid)
88 {
89 struct btrfs_device *dev;
90
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
94 return dev;
95 }
96 }
97 return NULL;
98 }
99
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
101 {
102 struct btrfs_fs_devices *fs_devices;
103
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 return fs_devices;
107 }
108 return NULL;
109 }
110
111 static int
112 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
113 int flush, struct block_device **bdev,
114 struct buffer_head **bh)
115 {
116 int ret;
117
118 *bdev = blkdev_get_by_path(device_path, flags, holder);
119
120 if (IS_ERR(*bdev)) {
121 ret = PTR_ERR(*bdev);
122 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
123 goto error;
124 }
125
126 if (flush)
127 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
128 ret = set_blocksize(*bdev, 4096);
129 if (ret) {
130 blkdev_put(*bdev, flags);
131 goto error;
132 }
133 invalidate_bdev(*bdev);
134 *bh = btrfs_read_dev_super(*bdev);
135 if (!*bh) {
136 ret = -EINVAL;
137 blkdev_put(*bdev, flags);
138 goto error;
139 }
140
141 return 0;
142
143 error:
144 *bdev = NULL;
145 *bh = NULL;
146 return ret;
147 }
148
149 static void requeue_list(struct btrfs_pending_bios *pending_bios,
150 struct bio *head, struct bio *tail)
151 {
152
153 struct bio *old_head;
154
155 old_head = pending_bios->head;
156 pending_bios->head = head;
157 if (pending_bios->tail)
158 tail->bi_next = old_head;
159 else
160 pending_bios->tail = tail;
161 }
162
163 /*
164 * we try to collect pending bios for a device so we don't get a large
165 * number of procs sending bios down to the same device. This greatly
166 * improves the schedulers ability to collect and merge the bios.
167 *
168 * But, it also turns into a long list of bios to process and that is sure
169 * to eventually make the worker thread block. The solution here is to
170 * make some progress and then put this work struct back at the end of
171 * the list if the block device is congested. This way, multiple devices
172 * can make progress from a single worker thread.
173 */
174 static noinline void run_scheduled_bios(struct btrfs_device *device)
175 {
176 struct bio *pending;
177 struct backing_dev_info *bdi;
178 struct btrfs_fs_info *fs_info;
179 struct btrfs_pending_bios *pending_bios;
180 struct bio *tail;
181 struct bio *cur;
182 int again = 0;
183 unsigned long num_run;
184 unsigned long batch_run = 0;
185 unsigned long limit;
186 unsigned long last_waited = 0;
187 int force_reg = 0;
188 int sync_pending = 0;
189 struct blk_plug plug;
190
191 /*
192 * this function runs all the bios we've collected for
193 * a particular device. We don't want to wander off to
194 * another device without first sending all of these down.
195 * So, setup a plug here and finish it off before we return
196 */
197 blk_start_plug(&plug);
198
199 bdi = blk_get_backing_dev_info(device->bdev);
200 fs_info = device->dev_root->fs_info;
201 limit = btrfs_async_submit_limit(fs_info);
202 limit = limit * 2 / 3;
203
204 loop:
205 spin_lock(&device->io_lock);
206
207 loop_lock:
208 num_run = 0;
209
210 /* take all the bios off the list at once and process them
211 * later on (without the lock held). But, remember the
212 * tail and other pointers so the bios can be properly reinserted
213 * into the list if we hit congestion
214 */
215 if (!force_reg && device->pending_sync_bios.head) {
216 pending_bios = &device->pending_sync_bios;
217 force_reg = 1;
218 } else {
219 pending_bios = &device->pending_bios;
220 force_reg = 0;
221 }
222
223 pending = pending_bios->head;
224 tail = pending_bios->tail;
225 WARN_ON(pending && !tail);
226
227 /*
228 * if pending was null this time around, no bios need processing
229 * at all and we can stop. Otherwise it'll loop back up again
230 * and do an additional check so no bios are missed.
231 *
232 * device->running_pending is used to synchronize with the
233 * schedule_bio code.
234 */
235 if (device->pending_sync_bios.head == NULL &&
236 device->pending_bios.head == NULL) {
237 again = 0;
238 device->running_pending = 0;
239 } else {
240 again = 1;
241 device->running_pending = 1;
242 }
243
244 pending_bios->head = NULL;
245 pending_bios->tail = NULL;
246
247 spin_unlock(&device->io_lock);
248
249 while (pending) {
250
251 rmb();
252 /* we want to work on both lists, but do more bios on the
253 * sync list than the regular list
254 */
255 if ((num_run > 32 &&
256 pending_bios != &device->pending_sync_bios &&
257 device->pending_sync_bios.head) ||
258 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
259 device->pending_bios.head)) {
260 spin_lock(&device->io_lock);
261 requeue_list(pending_bios, pending, tail);
262 goto loop_lock;
263 }
264
265 cur = pending;
266 pending = pending->bi_next;
267 cur->bi_next = NULL;
268
269 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
270 waitqueue_active(&fs_info->async_submit_wait))
271 wake_up(&fs_info->async_submit_wait);
272
273 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
274
275 /*
276 * if we're doing the sync list, record that our
277 * plug has some sync requests on it
278 *
279 * If we're doing the regular list and there are
280 * sync requests sitting around, unplug before
281 * we add more
282 */
283 if (pending_bios == &device->pending_sync_bios) {
284 sync_pending = 1;
285 } else if (sync_pending) {
286 blk_finish_plug(&plug);
287 blk_start_plug(&plug);
288 sync_pending = 0;
289 }
290
291 btrfsic_submit_bio(cur->bi_rw, cur);
292 num_run++;
293 batch_run++;
294 if (need_resched())
295 cond_resched();
296
297 /*
298 * we made progress, there is more work to do and the bdi
299 * is now congested. Back off and let other work structs
300 * run instead
301 */
302 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
303 fs_info->fs_devices->open_devices > 1) {
304 struct io_context *ioc;
305
306 ioc = current->io_context;
307
308 /*
309 * the main goal here is that we don't want to
310 * block if we're going to be able to submit
311 * more requests without blocking.
312 *
313 * This code does two great things, it pokes into
314 * the elevator code from a filesystem _and_
315 * it makes assumptions about how batching works.
316 */
317 if (ioc && ioc->nr_batch_requests > 0 &&
318 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
319 (last_waited == 0 ||
320 ioc->last_waited == last_waited)) {
321 /*
322 * we want to go through our batch of
323 * requests and stop. So, we copy out
324 * the ioc->last_waited time and test
325 * against it before looping
326 */
327 last_waited = ioc->last_waited;
328 if (need_resched())
329 cond_resched();
330 continue;
331 }
332 spin_lock(&device->io_lock);
333 requeue_list(pending_bios, pending, tail);
334 device->running_pending = 1;
335
336 spin_unlock(&device->io_lock);
337 btrfs_requeue_work(&device->work);
338 goto done;
339 }
340 /* unplug every 64 requests just for good measure */
341 if (batch_run % 64 == 0) {
342 blk_finish_plug(&plug);
343 blk_start_plug(&plug);
344 sync_pending = 0;
345 }
346 }
347
348 cond_resched();
349 if (again)
350 goto loop;
351
352 spin_lock(&device->io_lock);
353 if (device->pending_bios.head || device->pending_sync_bios.head)
354 goto loop_lock;
355 spin_unlock(&device->io_lock);
356
357 done:
358 blk_finish_plug(&plug);
359 }
360
361 static void pending_bios_fn(struct btrfs_work *work)
362 {
363 struct btrfs_device *device;
364
365 device = container_of(work, struct btrfs_device, work);
366 run_scheduled_bios(device);
367 }
368
369 static noinline int device_list_add(const char *path,
370 struct btrfs_super_block *disk_super,
371 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
372 {
373 struct btrfs_device *device;
374 struct btrfs_fs_devices *fs_devices;
375 struct rcu_string *name;
376 u64 found_transid = btrfs_super_generation(disk_super);
377
378 fs_devices = find_fsid(disk_super->fsid);
379 if (!fs_devices) {
380 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
381 if (!fs_devices)
382 return -ENOMEM;
383 INIT_LIST_HEAD(&fs_devices->devices);
384 INIT_LIST_HEAD(&fs_devices->alloc_list);
385 list_add(&fs_devices->list, &fs_uuids);
386 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
387 fs_devices->latest_devid = devid;
388 fs_devices->latest_trans = found_transid;
389 mutex_init(&fs_devices->device_list_mutex);
390 device = NULL;
391 } else {
392 device = __find_device(&fs_devices->devices, devid,
393 disk_super->dev_item.uuid);
394 }
395 if (!device) {
396 if (fs_devices->opened)
397 return -EBUSY;
398
399 device = kzalloc(sizeof(*device), GFP_NOFS);
400 if (!device) {
401 /* we can safely leave the fs_devices entry around */
402 return -ENOMEM;
403 }
404 device->devid = devid;
405 device->dev_stats_valid = 0;
406 device->work.func = pending_bios_fn;
407 memcpy(device->uuid, disk_super->dev_item.uuid,
408 BTRFS_UUID_SIZE);
409 spin_lock_init(&device->io_lock);
410
411 name = rcu_string_strdup(path, GFP_NOFS);
412 if (!name) {
413 kfree(device);
414 return -ENOMEM;
415 }
416 rcu_assign_pointer(device->name, name);
417 INIT_LIST_HEAD(&device->dev_alloc_list);
418
419 /* init readahead state */
420 spin_lock_init(&device->reada_lock);
421 device->reada_curr_zone = NULL;
422 atomic_set(&device->reada_in_flight, 0);
423 device->reada_next = 0;
424 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
425 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
426
427 mutex_lock(&fs_devices->device_list_mutex);
428 list_add_rcu(&device->dev_list, &fs_devices->devices);
429 mutex_unlock(&fs_devices->device_list_mutex);
430
431 device->fs_devices = fs_devices;
432 fs_devices->num_devices++;
433 } else if (!device->name || strcmp(device->name->str, path)) {
434 name = rcu_string_strdup(path, GFP_NOFS);
435 if (!name)
436 return -ENOMEM;
437 rcu_string_free(device->name);
438 rcu_assign_pointer(device->name, name);
439 if (device->missing) {
440 fs_devices->missing_devices--;
441 device->missing = 0;
442 }
443 }
444
445 if (found_transid > fs_devices->latest_trans) {
446 fs_devices->latest_devid = devid;
447 fs_devices->latest_trans = found_transid;
448 }
449 *fs_devices_ret = fs_devices;
450 return 0;
451 }
452
453 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
454 {
455 struct btrfs_fs_devices *fs_devices;
456 struct btrfs_device *device;
457 struct btrfs_device *orig_dev;
458
459 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
460 if (!fs_devices)
461 return ERR_PTR(-ENOMEM);
462
463 INIT_LIST_HEAD(&fs_devices->devices);
464 INIT_LIST_HEAD(&fs_devices->alloc_list);
465 INIT_LIST_HEAD(&fs_devices->list);
466 mutex_init(&fs_devices->device_list_mutex);
467 fs_devices->latest_devid = orig->latest_devid;
468 fs_devices->latest_trans = orig->latest_trans;
469 fs_devices->total_devices = orig->total_devices;
470 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
471
472 /* We have held the volume lock, it is safe to get the devices. */
473 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
474 struct rcu_string *name;
475
476 device = kzalloc(sizeof(*device), GFP_NOFS);
477 if (!device)
478 goto error;
479
480 /*
481 * This is ok to do without rcu read locked because we hold the
482 * uuid mutex so nothing we touch in here is going to disappear.
483 */
484 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
485 if (!name) {
486 kfree(device);
487 goto error;
488 }
489 rcu_assign_pointer(device->name, name);
490
491 device->devid = orig_dev->devid;
492 device->work.func = pending_bios_fn;
493 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
494 spin_lock_init(&device->io_lock);
495 INIT_LIST_HEAD(&device->dev_list);
496 INIT_LIST_HEAD(&device->dev_alloc_list);
497
498 list_add(&device->dev_list, &fs_devices->devices);
499 device->fs_devices = fs_devices;
500 fs_devices->num_devices++;
501 }
502 return fs_devices;
503 error:
504 free_fs_devices(fs_devices);
505 return ERR_PTR(-ENOMEM);
506 }
507
508 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
509 {
510 struct btrfs_device *device, *next;
511
512 struct block_device *latest_bdev = NULL;
513 u64 latest_devid = 0;
514 u64 latest_transid = 0;
515
516 mutex_lock(&uuid_mutex);
517 again:
518 /* This is the initialized path, it is safe to release the devices. */
519 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
520 if (device->in_fs_metadata) {
521 if (!latest_transid ||
522 device->generation > latest_transid) {
523 latest_devid = device->devid;
524 latest_transid = device->generation;
525 latest_bdev = device->bdev;
526 }
527 continue;
528 }
529
530 if (device->bdev) {
531 blkdev_put(device->bdev, device->mode);
532 device->bdev = NULL;
533 fs_devices->open_devices--;
534 }
535 if (device->writeable) {
536 list_del_init(&device->dev_alloc_list);
537 device->writeable = 0;
538 fs_devices->rw_devices--;
539 }
540 list_del_init(&device->dev_list);
541 fs_devices->num_devices--;
542 rcu_string_free(device->name);
543 kfree(device);
544 }
545
546 if (fs_devices->seed) {
547 fs_devices = fs_devices->seed;
548 goto again;
549 }
550
551 fs_devices->latest_bdev = latest_bdev;
552 fs_devices->latest_devid = latest_devid;
553 fs_devices->latest_trans = latest_transid;
554
555 mutex_unlock(&uuid_mutex);
556 }
557
558 static void __free_device(struct work_struct *work)
559 {
560 struct btrfs_device *device;
561
562 device = container_of(work, struct btrfs_device, rcu_work);
563
564 if (device->bdev)
565 blkdev_put(device->bdev, device->mode);
566
567 rcu_string_free(device->name);
568 kfree(device);
569 }
570
571 static void free_device(struct rcu_head *head)
572 {
573 struct btrfs_device *device;
574
575 device = container_of(head, struct btrfs_device, rcu);
576
577 INIT_WORK(&device->rcu_work, __free_device);
578 schedule_work(&device->rcu_work);
579 }
580
581 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
582 {
583 struct btrfs_device *device;
584
585 if (--fs_devices->opened > 0)
586 return 0;
587
588 mutex_lock(&fs_devices->device_list_mutex);
589 list_for_each_entry(device, &fs_devices->devices, dev_list) {
590 struct btrfs_device *new_device;
591 struct rcu_string *name;
592
593 if (device->bdev)
594 fs_devices->open_devices--;
595
596 if (device->writeable) {
597 list_del_init(&device->dev_alloc_list);
598 fs_devices->rw_devices--;
599 }
600
601 if (device->can_discard)
602 fs_devices->num_can_discard--;
603
604 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
605 BUG_ON(!new_device); /* -ENOMEM */
606 memcpy(new_device, device, sizeof(*new_device));
607
608 /* Safe because we are under uuid_mutex */
609 if (device->name) {
610 name = rcu_string_strdup(device->name->str, GFP_NOFS);
611 BUG_ON(device->name && !name); /* -ENOMEM */
612 rcu_assign_pointer(new_device->name, name);
613 }
614 new_device->bdev = NULL;
615 new_device->writeable = 0;
616 new_device->in_fs_metadata = 0;
617 new_device->can_discard = 0;
618 list_replace_rcu(&device->dev_list, &new_device->dev_list);
619
620 call_rcu(&device->rcu, free_device);
621 }
622 mutex_unlock(&fs_devices->device_list_mutex);
623
624 WARN_ON(fs_devices->open_devices);
625 WARN_ON(fs_devices->rw_devices);
626 fs_devices->opened = 0;
627 fs_devices->seeding = 0;
628
629 return 0;
630 }
631
632 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
633 {
634 struct btrfs_fs_devices *seed_devices = NULL;
635 int ret;
636
637 mutex_lock(&uuid_mutex);
638 ret = __btrfs_close_devices(fs_devices);
639 if (!fs_devices->opened) {
640 seed_devices = fs_devices->seed;
641 fs_devices->seed = NULL;
642 }
643 mutex_unlock(&uuid_mutex);
644
645 while (seed_devices) {
646 fs_devices = seed_devices;
647 seed_devices = fs_devices->seed;
648 __btrfs_close_devices(fs_devices);
649 free_fs_devices(fs_devices);
650 }
651 return ret;
652 }
653
654 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
655 fmode_t flags, void *holder)
656 {
657 struct request_queue *q;
658 struct block_device *bdev;
659 struct list_head *head = &fs_devices->devices;
660 struct btrfs_device *device;
661 struct block_device *latest_bdev = NULL;
662 struct buffer_head *bh;
663 struct btrfs_super_block *disk_super;
664 u64 latest_devid = 0;
665 u64 latest_transid = 0;
666 u64 devid;
667 int seeding = 1;
668 int ret = 0;
669
670 flags |= FMODE_EXCL;
671
672 list_for_each_entry(device, head, dev_list) {
673 if (device->bdev)
674 continue;
675 if (!device->name)
676 continue;
677
678 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
679 &bdev, &bh);
680 if (ret)
681 continue;
682
683 disk_super = (struct btrfs_super_block *)bh->b_data;
684 devid = btrfs_stack_device_id(&disk_super->dev_item);
685 if (devid != device->devid)
686 goto error_brelse;
687
688 if (memcmp(device->uuid, disk_super->dev_item.uuid,
689 BTRFS_UUID_SIZE))
690 goto error_brelse;
691
692 device->generation = btrfs_super_generation(disk_super);
693 if (!latest_transid || device->generation > latest_transid) {
694 latest_devid = devid;
695 latest_transid = device->generation;
696 latest_bdev = bdev;
697 }
698
699 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
700 device->writeable = 0;
701 } else {
702 device->writeable = !bdev_read_only(bdev);
703 seeding = 0;
704 }
705
706 q = bdev_get_queue(bdev);
707 if (blk_queue_discard(q)) {
708 device->can_discard = 1;
709 fs_devices->num_can_discard++;
710 }
711
712 device->bdev = bdev;
713 device->in_fs_metadata = 0;
714 device->mode = flags;
715
716 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
717 fs_devices->rotating = 1;
718
719 fs_devices->open_devices++;
720 if (device->writeable) {
721 fs_devices->rw_devices++;
722 list_add(&device->dev_alloc_list,
723 &fs_devices->alloc_list);
724 }
725 brelse(bh);
726 continue;
727
728 error_brelse:
729 brelse(bh);
730 blkdev_put(bdev, flags);
731 continue;
732 }
733 if (fs_devices->open_devices == 0) {
734 ret = -EINVAL;
735 goto out;
736 }
737 fs_devices->seeding = seeding;
738 fs_devices->opened = 1;
739 fs_devices->latest_bdev = latest_bdev;
740 fs_devices->latest_devid = latest_devid;
741 fs_devices->latest_trans = latest_transid;
742 fs_devices->total_rw_bytes = 0;
743 out:
744 return ret;
745 }
746
747 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
748 fmode_t flags, void *holder)
749 {
750 int ret;
751
752 mutex_lock(&uuid_mutex);
753 if (fs_devices->opened) {
754 fs_devices->opened++;
755 ret = 0;
756 } else {
757 ret = __btrfs_open_devices(fs_devices, flags, holder);
758 }
759 mutex_unlock(&uuid_mutex);
760 return ret;
761 }
762
763 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
764 struct btrfs_fs_devices **fs_devices_ret)
765 {
766 struct btrfs_super_block *disk_super;
767 struct block_device *bdev;
768 struct buffer_head *bh;
769 int ret;
770 u64 devid;
771 u64 transid;
772 u64 total_devices;
773
774 flags |= FMODE_EXCL;
775 mutex_lock(&uuid_mutex);
776 ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
777 if (ret)
778 goto error;
779 disk_super = (struct btrfs_super_block *)bh->b_data;
780 devid = btrfs_stack_device_id(&disk_super->dev_item);
781 transid = btrfs_super_generation(disk_super);
782 total_devices = btrfs_super_num_devices(disk_super);
783 if (disk_super->label[0]) {
784 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
785 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
786 printk(KERN_INFO "device label %s ", disk_super->label);
787 } else {
788 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
789 }
790 printk(KERN_CONT "devid %llu transid %llu %s\n",
791 (unsigned long long)devid, (unsigned long long)transid, path);
792 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
793 if (!ret && fs_devices_ret)
794 (*fs_devices_ret)->total_devices = total_devices;
795 brelse(bh);
796 blkdev_put(bdev, flags);
797 error:
798 mutex_unlock(&uuid_mutex);
799 return ret;
800 }
801
802 /* helper to account the used device space in the range */
803 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
804 u64 end, u64 *length)
805 {
806 struct btrfs_key key;
807 struct btrfs_root *root = device->dev_root;
808 struct btrfs_dev_extent *dev_extent;
809 struct btrfs_path *path;
810 u64 extent_end;
811 int ret;
812 int slot;
813 struct extent_buffer *l;
814
815 *length = 0;
816
817 if (start >= device->total_bytes)
818 return 0;
819
820 path = btrfs_alloc_path();
821 if (!path)
822 return -ENOMEM;
823 path->reada = 2;
824
825 key.objectid = device->devid;
826 key.offset = start;
827 key.type = BTRFS_DEV_EXTENT_KEY;
828
829 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
830 if (ret < 0)
831 goto out;
832 if (ret > 0) {
833 ret = btrfs_previous_item(root, path, key.objectid, key.type);
834 if (ret < 0)
835 goto out;
836 }
837
838 while (1) {
839 l = path->nodes[0];
840 slot = path->slots[0];
841 if (slot >= btrfs_header_nritems(l)) {
842 ret = btrfs_next_leaf(root, path);
843 if (ret == 0)
844 continue;
845 if (ret < 0)
846 goto out;
847
848 break;
849 }
850 btrfs_item_key_to_cpu(l, &key, slot);
851
852 if (key.objectid < device->devid)
853 goto next;
854
855 if (key.objectid > device->devid)
856 break;
857
858 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
859 goto next;
860
861 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
862 extent_end = key.offset + btrfs_dev_extent_length(l,
863 dev_extent);
864 if (key.offset <= start && extent_end > end) {
865 *length = end - start + 1;
866 break;
867 } else if (key.offset <= start && extent_end > start)
868 *length += extent_end - start;
869 else if (key.offset > start && extent_end <= end)
870 *length += extent_end - key.offset;
871 else if (key.offset > start && key.offset <= end) {
872 *length += end - key.offset + 1;
873 break;
874 } else if (key.offset > end)
875 break;
876
877 next:
878 path->slots[0]++;
879 }
880 ret = 0;
881 out:
882 btrfs_free_path(path);
883 return ret;
884 }
885
886 /*
887 * find_free_dev_extent - find free space in the specified device
888 * @device: the device which we search the free space in
889 * @num_bytes: the size of the free space that we need
890 * @start: store the start of the free space.
891 * @len: the size of the free space. that we find, or the size of the max
892 * free space if we don't find suitable free space
893 *
894 * this uses a pretty simple search, the expectation is that it is
895 * called very infrequently and that a given device has a small number
896 * of extents
897 *
898 * @start is used to store the start of the free space if we find. But if we
899 * don't find suitable free space, it will be used to store the start position
900 * of the max free space.
901 *
902 * @len is used to store the size of the free space that we find.
903 * But if we don't find suitable free space, it is used to store the size of
904 * the max free space.
905 */
906 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
907 u64 *start, u64 *len)
908 {
909 struct btrfs_key key;
910 struct btrfs_root *root = device->dev_root;
911 struct btrfs_dev_extent *dev_extent;
912 struct btrfs_path *path;
913 u64 hole_size;
914 u64 max_hole_start;
915 u64 max_hole_size;
916 u64 extent_end;
917 u64 search_start;
918 u64 search_end = device->total_bytes;
919 int ret;
920 int slot;
921 struct extent_buffer *l;
922
923 /* FIXME use last free of some kind */
924
925 /* we don't want to overwrite the superblock on the drive,
926 * so we make sure to start at an offset of at least 1MB
927 */
928 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
929
930 max_hole_start = search_start;
931 max_hole_size = 0;
932 hole_size = 0;
933
934 if (search_start >= search_end) {
935 ret = -ENOSPC;
936 goto error;
937 }
938
939 path = btrfs_alloc_path();
940 if (!path) {
941 ret = -ENOMEM;
942 goto error;
943 }
944 path->reada = 2;
945
946 key.objectid = device->devid;
947 key.offset = search_start;
948 key.type = BTRFS_DEV_EXTENT_KEY;
949
950 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
951 if (ret < 0)
952 goto out;
953 if (ret > 0) {
954 ret = btrfs_previous_item(root, path, key.objectid, key.type);
955 if (ret < 0)
956 goto out;
957 }
958
959 while (1) {
960 l = path->nodes[0];
961 slot = path->slots[0];
962 if (slot >= btrfs_header_nritems(l)) {
963 ret = btrfs_next_leaf(root, path);
964 if (ret == 0)
965 continue;
966 if (ret < 0)
967 goto out;
968
969 break;
970 }
971 btrfs_item_key_to_cpu(l, &key, slot);
972
973 if (key.objectid < device->devid)
974 goto next;
975
976 if (key.objectid > device->devid)
977 break;
978
979 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
980 goto next;
981
982 if (key.offset > search_start) {
983 hole_size = key.offset - search_start;
984
985 if (hole_size > max_hole_size) {
986 max_hole_start = search_start;
987 max_hole_size = hole_size;
988 }
989
990 /*
991 * If this free space is greater than which we need,
992 * it must be the max free space that we have found
993 * until now, so max_hole_start must point to the start
994 * of this free space and the length of this free space
995 * is stored in max_hole_size. Thus, we return
996 * max_hole_start and max_hole_size and go back to the
997 * caller.
998 */
999 if (hole_size >= num_bytes) {
1000 ret = 0;
1001 goto out;
1002 }
1003 }
1004
1005 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1006 extent_end = key.offset + btrfs_dev_extent_length(l,
1007 dev_extent);
1008 if (extent_end > search_start)
1009 search_start = extent_end;
1010 next:
1011 path->slots[0]++;
1012 cond_resched();
1013 }
1014
1015 /*
1016 * At this point, search_start should be the end of
1017 * allocated dev extents, and when shrinking the device,
1018 * search_end may be smaller than search_start.
1019 */
1020 if (search_end > search_start)
1021 hole_size = search_end - search_start;
1022
1023 if (hole_size > max_hole_size) {
1024 max_hole_start = search_start;
1025 max_hole_size = hole_size;
1026 }
1027
1028 /* See above. */
1029 if (hole_size < num_bytes)
1030 ret = -ENOSPC;
1031 else
1032 ret = 0;
1033
1034 out:
1035 btrfs_free_path(path);
1036 error:
1037 *start = max_hole_start;
1038 if (len)
1039 *len = max_hole_size;
1040 return ret;
1041 }
1042
1043 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1044 struct btrfs_device *device,
1045 u64 start)
1046 {
1047 int ret;
1048 struct btrfs_path *path;
1049 struct btrfs_root *root = device->dev_root;
1050 struct btrfs_key key;
1051 struct btrfs_key found_key;
1052 struct extent_buffer *leaf = NULL;
1053 struct btrfs_dev_extent *extent = NULL;
1054
1055 path = btrfs_alloc_path();
1056 if (!path)
1057 return -ENOMEM;
1058
1059 key.objectid = device->devid;
1060 key.offset = start;
1061 key.type = BTRFS_DEV_EXTENT_KEY;
1062 again:
1063 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1064 if (ret > 0) {
1065 ret = btrfs_previous_item(root, path, key.objectid,
1066 BTRFS_DEV_EXTENT_KEY);
1067 if (ret)
1068 goto out;
1069 leaf = path->nodes[0];
1070 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1071 extent = btrfs_item_ptr(leaf, path->slots[0],
1072 struct btrfs_dev_extent);
1073 BUG_ON(found_key.offset > start || found_key.offset +
1074 btrfs_dev_extent_length(leaf, extent) < start);
1075 key = found_key;
1076 btrfs_release_path(path);
1077 goto again;
1078 } else if (ret == 0) {
1079 leaf = path->nodes[0];
1080 extent = btrfs_item_ptr(leaf, path->slots[0],
1081 struct btrfs_dev_extent);
1082 } else {
1083 btrfs_error(root->fs_info, ret, "Slot search failed");
1084 goto out;
1085 }
1086
1087 if (device->bytes_used > 0) {
1088 u64 len = btrfs_dev_extent_length(leaf, extent);
1089 device->bytes_used -= len;
1090 spin_lock(&root->fs_info->free_chunk_lock);
1091 root->fs_info->free_chunk_space += len;
1092 spin_unlock(&root->fs_info->free_chunk_lock);
1093 }
1094 ret = btrfs_del_item(trans, root, path);
1095 if (ret) {
1096 btrfs_error(root->fs_info, ret,
1097 "Failed to remove dev extent item");
1098 }
1099 out:
1100 btrfs_free_path(path);
1101 return ret;
1102 }
1103
1104 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1105 struct btrfs_device *device,
1106 u64 chunk_tree, u64 chunk_objectid,
1107 u64 chunk_offset, u64 start, u64 num_bytes)
1108 {
1109 int ret;
1110 struct btrfs_path *path;
1111 struct btrfs_root *root = device->dev_root;
1112 struct btrfs_dev_extent *extent;
1113 struct extent_buffer *leaf;
1114 struct btrfs_key key;
1115
1116 WARN_ON(!device->in_fs_metadata);
1117 path = btrfs_alloc_path();
1118 if (!path)
1119 return -ENOMEM;
1120
1121 key.objectid = device->devid;
1122 key.offset = start;
1123 key.type = BTRFS_DEV_EXTENT_KEY;
1124 ret = btrfs_insert_empty_item(trans, root, path, &key,
1125 sizeof(*extent));
1126 if (ret)
1127 goto out;
1128
1129 leaf = path->nodes[0];
1130 extent = btrfs_item_ptr(leaf, path->slots[0],
1131 struct btrfs_dev_extent);
1132 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1133 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1134 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1135
1136 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1137 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1138 BTRFS_UUID_SIZE);
1139
1140 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1141 btrfs_mark_buffer_dirty(leaf);
1142 out:
1143 btrfs_free_path(path);
1144 return ret;
1145 }
1146
1147 static noinline int find_next_chunk(struct btrfs_root *root,
1148 u64 objectid, u64 *offset)
1149 {
1150 struct btrfs_path *path;
1151 int ret;
1152 struct btrfs_key key;
1153 struct btrfs_chunk *chunk;
1154 struct btrfs_key found_key;
1155
1156 path = btrfs_alloc_path();
1157 if (!path)
1158 return -ENOMEM;
1159
1160 key.objectid = objectid;
1161 key.offset = (u64)-1;
1162 key.type = BTRFS_CHUNK_ITEM_KEY;
1163
1164 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1165 if (ret < 0)
1166 goto error;
1167
1168 BUG_ON(ret == 0); /* Corruption */
1169
1170 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1171 if (ret) {
1172 *offset = 0;
1173 } else {
1174 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1175 path->slots[0]);
1176 if (found_key.objectid != objectid)
1177 *offset = 0;
1178 else {
1179 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1180 struct btrfs_chunk);
1181 *offset = found_key.offset +
1182 btrfs_chunk_length(path->nodes[0], chunk);
1183 }
1184 }
1185 ret = 0;
1186 error:
1187 btrfs_free_path(path);
1188 return ret;
1189 }
1190
1191 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1192 {
1193 int ret;
1194 struct btrfs_key key;
1195 struct btrfs_key found_key;
1196 struct btrfs_path *path;
1197
1198 root = root->fs_info->chunk_root;
1199
1200 path = btrfs_alloc_path();
1201 if (!path)
1202 return -ENOMEM;
1203
1204 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1205 key.type = BTRFS_DEV_ITEM_KEY;
1206 key.offset = (u64)-1;
1207
1208 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1209 if (ret < 0)
1210 goto error;
1211
1212 BUG_ON(ret == 0); /* Corruption */
1213
1214 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1215 BTRFS_DEV_ITEM_KEY);
1216 if (ret) {
1217 *objectid = 1;
1218 } else {
1219 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1220 path->slots[0]);
1221 *objectid = found_key.offset + 1;
1222 }
1223 ret = 0;
1224 error:
1225 btrfs_free_path(path);
1226 return ret;
1227 }
1228
1229 /*
1230 * the device information is stored in the chunk root
1231 * the btrfs_device struct should be fully filled in
1232 */
1233 int btrfs_add_device(struct btrfs_trans_handle *trans,
1234 struct btrfs_root *root,
1235 struct btrfs_device *device)
1236 {
1237 int ret;
1238 struct btrfs_path *path;
1239 struct btrfs_dev_item *dev_item;
1240 struct extent_buffer *leaf;
1241 struct btrfs_key key;
1242 unsigned long ptr;
1243
1244 root = root->fs_info->chunk_root;
1245
1246 path = btrfs_alloc_path();
1247 if (!path)
1248 return -ENOMEM;
1249
1250 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1251 key.type = BTRFS_DEV_ITEM_KEY;
1252 key.offset = device->devid;
1253
1254 ret = btrfs_insert_empty_item(trans, root, path, &key,
1255 sizeof(*dev_item));
1256 if (ret)
1257 goto out;
1258
1259 leaf = path->nodes[0];
1260 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1261
1262 btrfs_set_device_id(leaf, dev_item, device->devid);
1263 btrfs_set_device_generation(leaf, dev_item, 0);
1264 btrfs_set_device_type(leaf, dev_item, device->type);
1265 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1266 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1267 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1268 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1269 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1270 btrfs_set_device_group(leaf, dev_item, 0);
1271 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1272 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1273 btrfs_set_device_start_offset(leaf, dev_item, 0);
1274
1275 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1276 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1277 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1278 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1279 btrfs_mark_buffer_dirty(leaf);
1280
1281 ret = 0;
1282 out:
1283 btrfs_free_path(path);
1284 return ret;
1285 }
1286
1287 static int btrfs_rm_dev_item(struct btrfs_root *root,
1288 struct btrfs_device *device)
1289 {
1290 int ret;
1291 struct btrfs_path *path;
1292 struct btrfs_key key;
1293 struct btrfs_trans_handle *trans;
1294
1295 root = root->fs_info->chunk_root;
1296
1297 path = btrfs_alloc_path();
1298 if (!path)
1299 return -ENOMEM;
1300
1301 trans = btrfs_start_transaction(root, 0);
1302 if (IS_ERR(trans)) {
1303 btrfs_free_path(path);
1304 return PTR_ERR(trans);
1305 }
1306 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1307 key.type = BTRFS_DEV_ITEM_KEY;
1308 key.offset = device->devid;
1309 lock_chunks(root);
1310
1311 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1312 if (ret < 0)
1313 goto out;
1314
1315 if (ret > 0) {
1316 ret = -ENOENT;
1317 goto out;
1318 }
1319
1320 ret = btrfs_del_item(trans, root, path);
1321 if (ret)
1322 goto out;
1323 out:
1324 btrfs_free_path(path);
1325 unlock_chunks(root);
1326 btrfs_commit_transaction(trans, root);
1327 return ret;
1328 }
1329
1330 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1331 {
1332 struct btrfs_device *device;
1333 struct btrfs_device *next_device;
1334 struct block_device *bdev;
1335 struct buffer_head *bh = NULL;
1336 struct btrfs_super_block *disk_super;
1337 struct btrfs_fs_devices *cur_devices;
1338 u64 all_avail;
1339 u64 devid;
1340 u64 num_devices;
1341 u8 *dev_uuid;
1342 int ret = 0;
1343 bool clear_super = false;
1344
1345 mutex_lock(&uuid_mutex);
1346
1347 all_avail = root->fs_info->avail_data_alloc_bits |
1348 root->fs_info->avail_system_alloc_bits |
1349 root->fs_info->avail_metadata_alloc_bits;
1350
1351 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1352 root->fs_info->fs_devices->num_devices <= 4) {
1353 printk(KERN_ERR "btrfs: unable to go below four devices "
1354 "on raid10\n");
1355 ret = -EINVAL;
1356 goto out;
1357 }
1358
1359 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1360 root->fs_info->fs_devices->num_devices <= 2) {
1361 printk(KERN_ERR "btrfs: unable to go below two "
1362 "devices on raid1\n");
1363 ret = -EINVAL;
1364 goto out;
1365 }
1366
1367 if (strcmp(device_path, "missing") == 0) {
1368 struct list_head *devices;
1369 struct btrfs_device *tmp;
1370
1371 device = NULL;
1372 devices = &root->fs_info->fs_devices->devices;
1373 /*
1374 * It is safe to read the devices since the volume_mutex
1375 * is held.
1376 */
1377 list_for_each_entry(tmp, devices, dev_list) {
1378 if (tmp->in_fs_metadata && !tmp->bdev) {
1379 device = tmp;
1380 break;
1381 }
1382 }
1383 bdev = NULL;
1384 bh = NULL;
1385 disk_super = NULL;
1386 if (!device) {
1387 printk(KERN_ERR "btrfs: no missing devices found to "
1388 "remove\n");
1389 goto out;
1390 }
1391 } else {
1392 ret = btrfs_get_bdev_and_sb(device_path,
1393 FMODE_READ | FMODE_EXCL,
1394 root->fs_info->bdev_holder, 0,
1395 &bdev, &bh);
1396 if (ret)
1397 goto out;
1398 disk_super = (struct btrfs_super_block *)bh->b_data;
1399 devid = btrfs_stack_device_id(&disk_super->dev_item);
1400 dev_uuid = disk_super->dev_item.uuid;
1401 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1402 disk_super->fsid);
1403 if (!device) {
1404 ret = -ENOENT;
1405 goto error_brelse;
1406 }
1407 }
1408
1409 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1410 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1411 "device\n");
1412 ret = -EINVAL;
1413 goto error_brelse;
1414 }
1415
1416 if (device->writeable) {
1417 lock_chunks(root);
1418 list_del_init(&device->dev_alloc_list);
1419 unlock_chunks(root);
1420 root->fs_info->fs_devices->rw_devices--;
1421 clear_super = true;
1422 }
1423
1424 ret = btrfs_shrink_device(device, 0);
1425 if (ret)
1426 goto error_undo;
1427
1428 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1429 if (ret)
1430 goto error_undo;
1431
1432 spin_lock(&root->fs_info->free_chunk_lock);
1433 root->fs_info->free_chunk_space = device->total_bytes -
1434 device->bytes_used;
1435 spin_unlock(&root->fs_info->free_chunk_lock);
1436
1437 device->in_fs_metadata = 0;
1438 btrfs_scrub_cancel_dev(root->fs_info, device);
1439
1440 /*
1441 * the device list mutex makes sure that we don't change
1442 * the device list while someone else is writing out all
1443 * the device supers.
1444 */
1445
1446 cur_devices = device->fs_devices;
1447 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1448 list_del_rcu(&device->dev_list);
1449
1450 device->fs_devices->num_devices--;
1451 device->fs_devices->total_devices--;
1452
1453 if (device->missing)
1454 root->fs_info->fs_devices->missing_devices--;
1455
1456 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1457 struct btrfs_device, dev_list);
1458 if (device->bdev == root->fs_info->sb->s_bdev)
1459 root->fs_info->sb->s_bdev = next_device->bdev;
1460 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1461 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1462
1463 if (device->bdev)
1464 device->fs_devices->open_devices--;
1465
1466 call_rcu(&device->rcu, free_device);
1467 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1468
1469 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1470 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1471
1472 if (cur_devices->open_devices == 0) {
1473 struct btrfs_fs_devices *fs_devices;
1474 fs_devices = root->fs_info->fs_devices;
1475 while (fs_devices) {
1476 if (fs_devices->seed == cur_devices)
1477 break;
1478 fs_devices = fs_devices->seed;
1479 }
1480 fs_devices->seed = cur_devices->seed;
1481 cur_devices->seed = NULL;
1482 lock_chunks(root);
1483 __btrfs_close_devices(cur_devices);
1484 unlock_chunks(root);
1485 free_fs_devices(cur_devices);
1486 }
1487
1488 root->fs_info->num_tolerated_disk_barrier_failures =
1489 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1490
1491 /*
1492 * at this point, the device is zero sized. We want to
1493 * remove it from the devices list and zero out the old super
1494 */
1495 if (clear_super && disk_super) {
1496 /* make sure this device isn't detected as part of
1497 * the FS anymore
1498 */
1499 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1500 set_buffer_dirty(bh);
1501 sync_dirty_buffer(bh);
1502 }
1503
1504 ret = 0;
1505
1506 error_brelse:
1507 brelse(bh);
1508 error_close:
1509 if (bdev)
1510 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1511 out:
1512 mutex_unlock(&uuid_mutex);
1513 return ret;
1514 error_undo:
1515 if (device->writeable) {
1516 lock_chunks(root);
1517 list_add(&device->dev_alloc_list,
1518 &root->fs_info->fs_devices->alloc_list);
1519 unlock_chunks(root);
1520 root->fs_info->fs_devices->rw_devices++;
1521 }
1522 goto error_brelse;
1523 }
1524
1525 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1526 struct btrfs_device **device)
1527 {
1528 int ret = 0;
1529 struct btrfs_super_block *disk_super;
1530 u64 devid;
1531 u8 *dev_uuid;
1532 struct block_device *bdev;
1533 struct buffer_head *bh;
1534
1535 *device = NULL;
1536 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1537 root->fs_info->bdev_holder, 0, &bdev, &bh);
1538 if (ret)
1539 return ret;
1540 disk_super = (struct btrfs_super_block *)bh->b_data;
1541 devid = btrfs_stack_device_id(&disk_super->dev_item);
1542 dev_uuid = disk_super->dev_item.uuid;
1543 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1544 disk_super->fsid);
1545 brelse(bh);
1546 if (!*device)
1547 ret = -ENOENT;
1548 blkdev_put(bdev, FMODE_READ);
1549 return ret;
1550 }
1551
1552 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1553 char *device_path,
1554 struct btrfs_device **device)
1555 {
1556 *device = NULL;
1557 if (strcmp(device_path, "missing") == 0) {
1558 struct list_head *devices;
1559 struct btrfs_device *tmp;
1560
1561 devices = &root->fs_info->fs_devices->devices;
1562 /*
1563 * It is safe to read the devices since the volume_mutex
1564 * is held by the caller.
1565 */
1566 list_for_each_entry(tmp, devices, dev_list) {
1567 if (tmp->in_fs_metadata && !tmp->bdev) {
1568 *device = tmp;
1569 break;
1570 }
1571 }
1572
1573 if (!*device) {
1574 pr_err("btrfs: no missing device found\n");
1575 return -ENOENT;
1576 }
1577
1578 return 0;
1579 } else {
1580 return btrfs_find_device_by_path(root, device_path, device);
1581 }
1582 }
1583
1584 /*
1585 * does all the dirty work required for changing file system's UUID.
1586 */
1587 static int btrfs_prepare_sprout(struct btrfs_root *root)
1588 {
1589 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1590 struct btrfs_fs_devices *old_devices;
1591 struct btrfs_fs_devices *seed_devices;
1592 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1593 struct btrfs_device *device;
1594 u64 super_flags;
1595
1596 BUG_ON(!mutex_is_locked(&uuid_mutex));
1597 if (!fs_devices->seeding)
1598 return -EINVAL;
1599
1600 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1601 if (!seed_devices)
1602 return -ENOMEM;
1603
1604 old_devices = clone_fs_devices(fs_devices);
1605 if (IS_ERR(old_devices)) {
1606 kfree(seed_devices);
1607 return PTR_ERR(old_devices);
1608 }
1609
1610 list_add(&old_devices->list, &fs_uuids);
1611
1612 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1613 seed_devices->opened = 1;
1614 INIT_LIST_HEAD(&seed_devices->devices);
1615 INIT_LIST_HEAD(&seed_devices->alloc_list);
1616 mutex_init(&seed_devices->device_list_mutex);
1617
1618 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1619 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1620 synchronize_rcu);
1621 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1622
1623 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1624 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1625 device->fs_devices = seed_devices;
1626 }
1627
1628 fs_devices->seeding = 0;
1629 fs_devices->num_devices = 0;
1630 fs_devices->open_devices = 0;
1631 fs_devices->total_devices = 0;
1632 fs_devices->seed = seed_devices;
1633
1634 generate_random_uuid(fs_devices->fsid);
1635 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1636 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1637 super_flags = btrfs_super_flags(disk_super) &
1638 ~BTRFS_SUPER_FLAG_SEEDING;
1639 btrfs_set_super_flags(disk_super, super_flags);
1640
1641 return 0;
1642 }
1643
1644 /*
1645 * strore the expected generation for seed devices in device items.
1646 */
1647 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1648 struct btrfs_root *root)
1649 {
1650 struct btrfs_path *path;
1651 struct extent_buffer *leaf;
1652 struct btrfs_dev_item *dev_item;
1653 struct btrfs_device *device;
1654 struct btrfs_key key;
1655 u8 fs_uuid[BTRFS_UUID_SIZE];
1656 u8 dev_uuid[BTRFS_UUID_SIZE];
1657 u64 devid;
1658 int ret;
1659
1660 path = btrfs_alloc_path();
1661 if (!path)
1662 return -ENOMEM;
1663
1664 root = root->fs_info->chunk_root;
1665 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1666 key.offset = 0;
1667 key.type = BTRFS_DEV_ITEM_KEY;
1668
1669 while (1) {
1670 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1671 if (ret < 0)
1672 goto error;
1673
1674 leaf = path->nodes[0];
1675 next_slot:
1676 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1677 ret = btrfs_next_leaf(root, path);
1678 if (ret > 0)
1679 break;
1680 if (ret < 0)
1681 goto error;
1682 leaf = path->nodes[0];
1683 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1684 btrfs_release_path(path);
1685 continue;
1686 }
1687
1688 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1689 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1690 key.type != BTRFS_DEV_ITEM_KEY)
1691 break;
1692
1693 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1694 struct btrfs_dev_item);
1695 devid = btrfs_device_id(leaf, dev_item);
1696 read_extent_buffer(leaf, dev_uuid,
1697 (unsigned long)btrfs_device_uuid(dev_item),
1698 BTRFS_UUID_SIZE);
1699 read_extent_buffer(leaf, fs_uuid,
1700 (unsigned long)btrfs_device_fsid(dev_item),
1701 BTRFS_UUID_SIZE);
1702 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1703 fs_uuid);
1704 BUG_ON(!device); /* Logic error */
1705
1706 if (device->fs_devices->seeding) {
1707 btrfs_set_device_generation(leaf, dev_item,
1708 device->generation);
1709 btrfs_mark_buffer_dirty(leaf);
1710 }
1711
1712 path->slots[0]++;
1713 goto next_slot;
1714 }
1715 ret = 0;
1716 error:
1717 btrfs_free_path(path);
1718 return ret;
1719 }
1720
1721 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1722 {
1723 struct request_queue *q;
1724 struct btrfs_trans_handle *trans;
1725 struct btrfs_device *device;
1726 struct block_device *bdev;
1727 struct list_head *devices;
1728 struct super_block *sb = root->fs_info->sb;
1729 struct rcu_string *name;
1730 u64 total_bytes;
1731 int seeding_dev = 0;
1732 int ret = 0;
1733
1734 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1735 return -EROFS;
1736
1737 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1738 root->fs_info->bdev_holder);
1739 if (IS_ERR(bdev))
1740 return PTR_ERR(bdev);
1741
1742 if (root->fs_info->fs_devices->seeding) {
1743 seeding_dev = 1;
1744 down_write(&sb->s_umount);
1745 mutex_lock(&uuid_mutex);
1746 }
1747
1748 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1749
1750 devices = &root->fs_info->fs_devices->devices;
1751
1752 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1753 list_for_each_entry(device, devices, dev_list) {
1754 if (device->bdev == bdev) {
1755 ret = -EEXIST;
1756 mutex_unlock(
1757 &root->fs_info->fs_devices->device_list_mutex);
1758 goto error;
1759 }
1760 }
1761 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1762
1763 device = kzalloc(sizeof(*device), GFP_NOFS);
1764 if (!device) {
1765 /* we can safely leave the fs_devices entry around */
1766 ret = -ENOMEM;
1767 goto error;
1768 }
1769
1770 name = rcu_string_strdup(device_path, GFP_NOFS);
1771 if (!name) {
1772 kfree(device);
1773 ret = -ENOMEM;
1774 goto error;
1775 }
1776 rcu_assign_pointer(device->name, name);
1777
1778 ret = find_next_devid(root, &device->devid);
1779 if (ret) {
1780 rcu_string_free(device->name);
1781 kfree(device);
1782 goto error;
1783 }
1784
1785 trans = btrfs_start_transaction(root, 0);
1786 if (IS_ERR(trans)) {
1787 rcu_string_free(device->name);
1788 kfree(device);
1789 ret = PTR_ERR(trans);
1790 goto error;
1791 }
1792
1793 lock_chunks(root);
1794
1795 q = bdev_get_queue(bdev);
1796 if (blk_queue_discard(q))
1797 device->can_discard = 1;
1798 device->writeable = 1;
1799 device->work.func = pending_bios_fn;
1800 generate_random_uuid(device->uuid);
1801 spin_lock_init(&device->io_lock);
1802 device->generation = trans->transid;
1803 device->io_width = root->sectorsize;
1804 device->io_align = root->sectorsize;
1805 device->sector_size = root->sectorsize;
1806 device->total_bytes = i_size_read(bdev->bd_inode);
1807 device->disk_total_bytes = device->total_bytes;
1808 device->dev_root = root->fs_info->dev_root;
1809 device->bdev = bdev;
1810 device->in_fs_metadata = 1;
1811 device->mode = FMODE_EXCL;
1812 set_blocksize(device->bdev, 4096);
1813
1814 if (seeding_dev) {
1815 sb->s_flags &= ~MS_RDONLY;
1816 ret = btrfs_prepare_sprout(root);
1817 BUG_ON(ret); /* -ENOMEM */
1818 }
1819
1820 device->fs_devices = root->fs_info->fs_devices;
1821
1822 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1823 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1824 list_add(&device->dev_alloc_list,
1825 &root->fs_info->fs_devices->alloc_list);
1826 root->fs_info->fs_devices->num_devices++;
1827 root->fs_info->fs_devices->open_devices++;
1828 root->fs_info->fs_devices->rw_devices++;
1829 root->fs_info->fs_devices->total_devices++;
1830 if (device->can_discard)
1831 root->fs_info->fs_devices->num_can_discard++;
1832 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1833
1834 spin_lock(&root->fs_info->free_chunk_lock);
1835 root->fs_info->free_chunk_space += device->total_bytes;
1836 spin_unlock(&root->fs_info->free_chunk_lock);
1837
1838 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1839 root->fs_info->fs_devices->rotating = 1;
1840
1841 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1842 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1843 total_bytes + device->total_bytes);
1844
1845 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1846 btrfs_set_super_num_devices(root->fs_info->super_copy,
1847 total_bytes + 1);
1848 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1849
1850 if (seeding_dev) {
1851 ret = init_first_rw_device(trans, root, device);
1852 if (ret) {
1853 btrfs_abort_transaction(trans, root, ret);
1854 goto error_trans;
1855 }
1856 ret = btrfs_finish_sprout(trans, root);
1857 if (ret) {
1858 btrfs_abort_transaction(trans, root, ret);
1859 goto error_trans;
1860 }
1861 } else {
1862 ret = btrfs_add_device(trans, root, device);
1863 if (ret) {
1864 btrfs_abort_transaction(trans, root, ret);
1865 goto error_trans;
1866 }
1867 }
1868
1869 /*
1870 * we've got more storage, clear any full flags on the space
1871 * infos
1872 */
1873 btrfs_clear_space_info_full(root->fs_info);
1874
1875 unlock_chunks(root);
1876 root->fs_info->num_tolerated_disk_barrier_failures =
1877 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1878 ret = btrfs_commit_transaction(trans, root);
1879
1880 if (seeding_dev) {
1881 mutex_unlock(&uuid_mutex);
1882 up_write(&sb->s_umount);
1883
1884 if (ret) /* transaction commit */
1885 return ret;
1886
1887 ret = btrfs_relocate_sys_chunks(root);
1888 if (ret < 0)
1889 btrfs_error(root->fs_info, ret,
1890 "Failed to relocate sys chunks after "
1891 "device initialization. This can be fixed "
1892 "using the \"btrfs balance\" command.");
1893 trans = btrfs_attach_transaction(root);
1894 if (IS_ERR(trans)) {
1895 if (PTR_ERR(trans) == -ENOENT)
1896 return 0;
1897 return PTR_ERR(trans);
1898 }
1899 ret = btrfs_commit_transaction(trans, root);
1900 }
1901
1902 return ret;
1903
1904 error_trans:
1905 unlock_chunks(root);
1906 btrfs_end_transaction(trans, root);
1907 rcu_string_free(device->name);
1908 kfree(device);
1909 error:
1910 blkdev_put(bdev, FMODE_EXCL);
1911 if (seeding_dev) {
1912 mutex_unlock(&uuid_mutex);
1913 up_write(&sb->s_umount);
1914 }
1915 return ret;
1916 }
1917
1918 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1919 struct btrfs_device *device)
1920 {
1921 int ret;
1922 struct btrfs_path *path;
1923 struct btrfs_root *root;
1924 struct btrfs_dev_item *dev_item;
1925 struct extent_buffer *leaf;
1926 struct btrfs_key key;
1927
1928 root = device->dev_root->fs_info->chunk_root;
1929
1930 path = btrfs_alloc_path();
1931 if (!path)
1932 return -ENOMEM;
1933
1934 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1935 key.type = BTRFS_DEV_ITEM_KEY;
1936 key.offset = device->devid;
1937
1938 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1939 if (ret < 0)
1940 goto out;
1941
1942 if (ret > 0) {
1943 ret = -ENOENT;
1944 goto out;
1945 }
1946
1947 leaf = path->nodes[0];
1948 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1949
1950 btrfs_set_device_id(leaf, dev_item, device->devid);
1951 btrfs_set_device_type(leaf, dev_item, device->type);
1952 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1953 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1954 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1955 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1956 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1957 btrfs_mark_buffer_dirty(leaf);
1958
1959 out:
1960 btrfs_free_path(path);
1961 return ret;
1962 }
1963
1964 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1965 struct btrfs_device *device, u64 new_size)
1966 {
1967 struct btrfs_super_block *super_copy =
1968 device->dev_root->fs_info->super_copy;
1969 u64 old_total = btrfs_super_total_bytes(super_copy);
1970 u64 diff = new_size - device->total_bytes;
1971
1972 if (!device->writeable)
1973 return -EACCES;
1974 if (new_size <= device->total_bytes)
1975 return -EINVAL;
1976
1977 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1978 device->fs_devices->total_rw_bytes += diff;
1979
1980 device->total_bytes = new_size;
1981 device->disk_total_bytes = new_size;
1982 btrfs_clear_space_info_full(device->dev_root->fs_info);
1983
1984 return btrfs_update_device(trans, device);
1985 }
1986
1987 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1988 struct btrfs_device *device, u64 new_size)
1989 {
1990 int ret;
1991 lock_chunks(device->dev_root);
1992 ret = __btrfs_grow_device(trans, device, new_size);
1993 unlock_chunks(device->dev_root);
1994 return ret;
1995 }
1996
1997 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1998 struct btrfs_root *root,
1999 u64 chunk_tree, u64 chunk_objectid,
2000 u64 chunk_offset)
2001 {
2002 int ret;
2003 struct btrfs_path *path;
2004 struct btrfs_key key;
2005
2006 root = root->fs_info->chunk_root;
2007 path = btrfs_alloc_path();
2008 if (!path)
2009 return -ENOMEM;
2010
2011 key.objectid = chunk_objectid;
2012 key.offset = chunk_offset;
2013 key.type = BTRFS_CHUNK_ITEM_KEY;
2014
2015 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2016 if (ret < 0)
2017 goto out;
2018 else if (ret > 0) { /* Logic error or corruption */
2019 btrfs_error(root->fs_info, -ENOENT,
2020 "Failed lookup while freeing chunk.");
2021 ret = -ENOENT;
2022 goto out;
2023 }
2024
2025 ret = btrfs_del_item(trans, root, path);
2026 if (ret < 0)
2027 btrfs_error(root->fs_info, ret,
2028 "Failed to delete chunk item.");
2029 out:
2030 btrfs_free_path(path);
2031 return ret;
2032 }
2033
2034 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2035 chunk_offset)
2036 {
2037 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2038 struct btrfs_disk_key *disk_key;
2039 struct btrfs_chunk *chunk;
2040 u8 *ptr;
2041 int ret = 0;
2042 u32 num_stripes;
2043 u32 array_size;
2044 u32 len = 0;
2045 u32 cur;
2046 struct btrfs_key key;
2047
2048 array_size = btrfs_super_sys_array_size(super_copy);
2049
2050 ptr = super_copy->sys_chunk_array;
2051 cur = 0;
2052
2053 while (cur < array_size) {
2054 disk_key = (struct btrfs_disk_key *)ptr;
2055 btrfs_disk_key_to_cpu(&key, disk_key);
2056
2057 len = sizeof(*disk_key);
2058
2059 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2060 chunk = (struct btrfs_chunk *)(ptr + len);
2061 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2062 len += btrfs_chunk_item_size(num_stripes);
2063 } else {
2064 ret = -EIO;
2065 break;
2066 }
2067 if (key.objectid == chunk_objectid &&
2068 key.offset == chunk_offset) {
2069 memmove(ptr, ptr + len, array_size - (cur + len));
2070 array_size -= len;
2071 btrfs_set_super_sys_array_size(super_copy, array_size);
2072 } else {
2073 ptr += len;
2074 cur += len;
2075 }
2076 }
2077 return ret;
2078 }
2079
2080 static int btrfs_relocate_chunk(struct btrfs_root *root,
2081 u64 chunk_tree, u64 chunk_objectid,
2082 u64 chunk_offset)
2083 {
2084 struct extent_map_tree *em_tree;
2085 struct btrfs_root *extent_root;
2086 struct btrfs_trans_handle *trans;
2087 struct extent_map *em;
2088 struct map_lookup *map;
2089 int ret;
2090 int i;
2091
2092 root = root->fs_info->chunk_root;
2093 extent_root = root->fs_info->extent_root;
2094 em_tree = &root->fs_info->mapping_tree.map_tree;
2095
2096 ret = btrfs_can_relocate(extent_root, chunk_offset);
2097 if (ret)
2098 return -ENOSPC;
2099
2100 /* step one, relocate all the extents inside this chunk */
2101 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2102 if (ret)
2103 return ret;
2104
2105 trans = btrfs_start_transaction(root, 0);
2106 BUG_ON(IS_ERR(trans));
2107
2108 lock_chunks(root);
2109
2110 /*
2111 * step two, delete the device extents and the
2112 * chunk tree entries
2113 */
2114 read_lock(&em_tree->lock);
2115 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2116 read_unlock(&em_tree->lock);
2117
2118 BUG_ON(!em || em->start > chunk_offset ||
2119 em->start + em->len < chunk_offset);
2120 map = (struct map_lookup *)em->bdev;
2121
2122 for (i = 0; i < map->num_stripes; i++) {
2123 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2124 map->stripes[i].physical);
2125 BUG_ON(ret);
2126
2127 if (map->stripes[i].dev) {
2128 ret = btrfs_update_device(trans, map->stripes[i].dev);
2129 BUG_ON(ret);
2130 }
2131 }
2132 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2133 chunk_offset);
2134
2135 BUG_ON(ret);
2136
2137 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2138
2139 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2140 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2141 BUG_ON(ret);
2142 }
2143
2144 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2145 BUG_ON(ret);
2146
2147 write_lock(&em_tree->lock);
2148 remove_extent_mapping(em_tree, em);
2149 write_unlock(&em_tree->lock);
2150
2151 kfree(map);
2152 em->bdev = NULL;
2153
2154 /* once for the tree */
2155 free_extent_map(em);
2156 /* once for us */
2157 free_extent_map(em);
2158
2159 unlock_chunks(root);
2160 btrfs_end_transaction(trans, root);
2161 return 0;
2162 }
2163
2164 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2165 {
2166 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2167 struct btrfs_path *path;
2168 struct extent_buffer *leaf;
2169 struct btrfs_chunk *chunk;
2170 struct btrfs_key key;
2171 struct btrfs_key found_key;
2172 u64 chunk_tree = chunk_root->root_key.objectid;
2173 u64 chunk_type;
2174 bool retried = false;
2175 int failed = 0;
2176 int ret;
2177
2178 path = btrfs_alloc_path();
2179 if (!path)
2180 return -ENOMEM;
2181
2182 again:
2183 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2184 key.offset = (u64)-1;
2185 key.type = BTRFS_CHUNK_ITEM_KEY;
2186
2187 while (1) {
2188 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2189 if (ret < 0)
2190 goto error;
2191 BUG_ON(ret == 0); /* Corruption */
2192
2193 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2194 key.type);
2195 if (ret < 0)
2196 goto error;
2197 if (ret > 0)
2198 break;
2199
2200 leaf = path->nodes[0];
2201 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2202
2203 chunk = btrfs_item_ptr(leaf, path->slots[0],
2204 struct btrfs_chunk);
2205 chunk_type = btrfs_chunk_type(leaf, chunk);
2206 btrfs_release_path(path);
2207
2208 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2209 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2210 found_key.objectid,
2211 found_key.offset);
2212 if (ret == -ENOSPC)
2213 failed++;
2214 else if (ret)
2215 BUG();
2216 }
2217
2218 if (found_key.offset == 0)
2219 break;
2220 key.offset = found_key.offset - 1;
2221 }
2222 ret = 0;
2223 if (failed && !retried) {
2224 failed = 0;
2225 retried = true;
2226 goto again;
2227 } else if (failed && retried) {
2228 WARN_ON(1);
2229 ret = -ENOSPC;
2230 }
2231 error:
2232 btrfs_free_path(path);
2233 return ret;
2234 }
2235
2236 static int insert_balance_item(struct btrfs_root *root,
2237 struct btrfs_balance_control *bctl)
2238 {
2239 struct btrfs_trans_handle *trans;
2240 struct btrfs_balance_item *item;
2241 struct btrfs_disk_balance_args disk_bargs;
2242 struct btrfs_path *path;
2243 struct extent_buffer *leaf;
2244 struct btrfs_key key;
2245 int ret, err;
2246
2247 path = btrfs_alloc_path();
2248 if (!path)
2249 return -ENOMEM;
2250
2251 trans = btrfs_start_transaction(root, 0);
2252 if (IS_ERR(trans)) {
2253 btrfs_free_path(path);
2254 return PTR_ERR(trans);
2255 }
2256
2257 key.objectid = BTRFS_BALANCE_OBJECTID;
2258 key.type = BTRFS_BALANCE_ITEM_KEY;
2259 key.offset = 0;
2260
2261 ret = btrfs_insert_empty_item(trans, root, path, &key,
2262 sizeof(*item));
2263 if (ret)
2264 goto out;
2265
2266 leaf = path->nodes[0];
2267 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2268
2269 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2270
2271 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2272 btrfs_set_balance_data(leaf, item, &disk_bargs);
2273 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2274 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2275 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2276 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2277
2278 btrfs_set_balance_flags(leaf, item, bctl->flags);
2279
2280 btrfs_mark_buffer_dirty(leaf);
2281 out:
2282 btrfs_free_path(path);
2283 err = btrfs_commit_transaction(trans, root);
2284 if (err && !ret)
2285 ret = err;
2286 return ret;
2287 }
2288
2289 static int del_balance_item(struct btrfs_root *root)
2290 {
2291 struct btrfs_trans_handle *trans;
2292 struct btrfs_path *path;
2293 struct btrfs_key key;
2294 int ret, err;
2295
2296 path = btrfs_alloc_path();
2297 if (!path)
2298 return -ENOMEM;
2299
2300 trans = btrfs_start_transaction(root, 0);
2301 if (IS_ERR(trans)) {
2302 btrfs_free_path(path);
2303 return PTR_ERR(trans);
2304 }
2305
2306 key.objectid = BTRFS_BALANCE_OBJECTID;
2307 key.type = BTRFS_BALANCE_ITEM_KEY;
2308 key.offset = 0;
2309
2310 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2311 if (ret < 0)
2312 goto out;
2313 if (ret > 0) {
2314 ret = -ENOENT;
2315 goto out;
2316 }
2317
2318 ret = btrfs_del_item(trans, root, path);
2319 out:
2320 btrfs_free_path(path);
2321 err = btrfs_commit_transaction(trans, root);
2322 if (err && !ret)
2323 ret = err;
2324 return ret;
2325 }
2326
2327 /*
2328 * This is a heuristic used to reduce the number of chunks balanced on
2329 * resume after balance was interrupted.
2330 */
2331 static void update_balance_args(struct btrfs_balance_control *bctl)
2332 {
2333 /*
2334 * Turn on soft mode for chunk types that were being converted.
2335 */
2336 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2337 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2338 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2339 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2340 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2341 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2342
2343 /*
2344 * Turn on usage filter if is not already used. The idea is
2345 * that chunks that we have already balanced should be
2346 * reasonably full. Don't do it for chunks that are being
2347 * converted - that will keep us from relocating unconverted
2348 * (albeit full) chunks.
2349 */
2350 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2351 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2352 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2353 bctl->data.usage = 90;
2354 }
2355 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2356 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2357 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2358 bctl->sys.usage = 90;
2359 }
2360 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2361 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2362 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2363 bctl->meta.usage = 90;
2364 }
2365 }
2366
2367 /*
2368 * Should be called with both balance and volume mutexes held to
2369 * serialize other volume operations (add_dev/rm_dev/resize) with
2370 * restriper. Same goes for unset_balance_control.
2371 */
2372 static void set_balance_control(struct btrfs_balance_control *bctl)
2373 {
2374 struct btrfs_fs_info *fs_info = bctl->fs_info;
2375
2376 BUG_ON(fs_info->balance_ctl);
2377
2378 spin_lock(&fs_info->balance_lock);
2379 fs_info->balance_ctl = bctl;
2380 spin_unlock(&fs_info->balance_lock);
2381 }
2382
2383 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2384 {
2385 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2386
2387 BUG_ON(!fs_info->balance_ctl);
2388
2389 spin_lock(&fs_info->balance_lock);
2390 fs_info->balance_ctl = NULL;
2391 spin_unlock(&fs_info->balance_lock);
2392
2393 kfree(bctl);
2394 }
2395
2396 /*
2397 * Balance filters. Return 1 if chunk should be filtered out
2398 * (should not be balanced).
2399 */
2400 static int chunk_profiles_filter(u64 chunk_type,
2401 struct btrfs_balance_args *bargs)
2402 {
2403 chunk_type = chunk_to_extended(chunk_type) &
2404 BTRFS_EXTENDED_PROFILE_MASK;
2405
2406 if (bargs->profiles & chunk_type)
2407 return 0;
2408
2409 return 1;
2410 }
2411
2412 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2413 struct btrfs_balance_args *bargs)
2414 {
2415 struct btrfs_block_group_cache *cache;
2416 u64 chunk_used, user_thresh;
2417 int ret = 1;
2418
2419 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2420 chunk_used = btrfs_block_group_used(&cache->item);
2421
2422 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2423 if (chunk_used < user_thresh)
2424 ret = 0;
2425
2426 btrfs_put_block_group(cache);
2427 return ret;
2428 }
2429
2430 static int chunk_devid_filter(struct extent_buffer *leaf,
2431 struct btrfs_chunk *chunk,
2432 struct btrfs_balance_args *bargs)
2433 {
2434 struct btrfs_stripe *stripe;
2435 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2436 int i;
2437
2438 for (i = 0; i < num_stripes; i++) {
2439 stripe = btrfs_stripe_nr(chunk, i);
2440 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2441 return 0;
2442 }
2443
2444 return 1;
2445 }
2446
2447 /* [pstart, pend) */
2448 static int chunk_drange_filter(struct extent_buffer *leaf,
2449 struct btrfs_chunk *chunk,
2450 u64 chunk_offset,
2451 struct btrfs_balance_args *bargs)
2452 {
2453 struct btrfs_stripe *stripe;
2454 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2455 u64 stripe_offset;
2456 u64 stripe_length;
2457 int factor;
2458 int i;
2459
2460 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2461 return 0;
2462
2463 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2464 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2465 factor = 2;
2466 else
2467 factor = 1;
2468 factor = num_stripes / factor;
2469
2470 for (i = 0; i < num_stripes; i++) {
2471 stripe = btrfs_stripe_nr(chunk, i);
2472 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2473 continue;
2474
2475 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2476 stripe_length = btrfs_chunk_length(leaf, chunk);
2477 do_div(stripe_length, factor);
2478
2479 if (stripe_offset < bargs->pend &&
2480 stripe_offset + stripe_length > bargs->pstart)
2481 return 0;
2482 }
2483
2484 return 1;
2485 }
2486
2487 /* [vstart, vend) */
2488 static int chunk_vrange_filter(struct extent_buffer *leaf,
2489 struct btrfs_chunk *chunk,
2490 u64 chunk_offset,
2491 struct btrfs_balance_args *bargs)
2492 {
2493 if (chunk_offset < bargs->vend &&
2494 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2495 /* at least part of the chunk is inside this vrange */
2496 return 0;
2497
2498 return 1;
2499 }
2500
2501 static int chunk_soft_convert_filter(u64 chunk_type,
2502 struct btrfs_balance_args *bargs)
2503 {
2504 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2505 return 0;
2506
2507 chunk_type = chunk_to_extended(chunk_type) &
2508 BTRFS_EXTENDED_PROFILE_MASK;
2509
2510 if (bargs->target == chunk_type)
2511 return 1;
2512
2513 return 0;
2514 }
2515
2516 static int should_balance_chunk(struct btrfs_root *root,
2517 struct extent_buffer *leaf,
2518 struct btrfs_chunk *chunk, u64 chunk_offset)
2519 {
2520 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2521 struct btrfs_balance_args *bargs = NULL;
2522 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2523
2524 /* type filter */
2525 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2526 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2527 return 0;
2528 }
2529
2530 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2531 bargs = &bctl->data;
2532 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2533 bargs = &bctl->sys;
2534 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2535 bargs = &bctl->meta;
2536
2537 /* profiles filter */
2538 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2539 chunk_profiles_filter(chunk_type, bargs)) {
2540 return 0;
2541 }
2542
2543 /* usage filter */
2544 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2545 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2546 return 0;
2547 }
2548
2549 /* devid filter */
2550 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2551 chunk_devid_filter(leaf, chunk, bargs)) {
2552 return 0;
2553 }
2554
2555 /* drange filter, makes sense only with devid filter */
2556 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2557 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2558 return 0;
2559 }
2560
2561 /* vrange filter */
2562 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2563 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2564 return 0;
2565 }
2566
2567 /* soft profile changing mode */
2568 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2569 chunk_soft_convert_filter(chunk_type, bargs)) {
2570 return 0;
2571 }
2572
2573 return 1;
2574 }
2575
2576 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2577 {
2578 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2579 struct btrfs_root *chunk_root = fs_info->chunk_root;
2580 struct btrfs_root *dev_root = fs_info->dev_root;
2581 struct list_head *devices;
2582 struct btrfs_device *device;
2583 u64 old_size;
2584 u64 size_to_free;
2585 struct btrfs_chunk *chunk;
2586 struct btrfs_path *path;
2587 struct btrfs_key key;
2588 struct btrfs_key found_key;
2589 struct btrfs_trans_handle *trans;
2590 struct extent_buffer *leaf;
2591 int slot;
2592 int ret;
2593 int enospc_errors = 0;
2594 bool counting = true;
2595
2596 /* step one make some room on all the devices */
2597 devices = &fs_info->fs_devices->devices;
2598 list_for_each_entry(device, devices, dev_list) {
2599 old_size = device->total_bytes;
2600 size_to_free = div_factor(old_size, 1);
2601 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2602 if (!device->writeable ||
2603 device->total_bytes - device->bytes_used > size_to_free)
2604 continue;
2605
2606 ret = btrfs_shrink_device(device, old_size - size_to_free);
2607 if (ret == -ENOSPC)
2608 break;
2609 BUG_ON(ret);
2610
2611 trans = btrfs_start_transaction(dev_root, 0);
2612 BUG_ON(IS_ERR(trans));
2613
2614 ret = btrfs_grow_device(trans, device, old_size);
2615 BUG_ON(ret);
2616
2617 btrfs_end_transaction(trans, dev_root);
2618 }
2619
2620 /* step two, relocate all the chunks */
2621 path = btrfs_alloc_path();
2622 if (!path) {
2623 ret = -ENOMEM;
2624 goto error;
2625 }
2626
2627 /* zero out stat counters */
2628 spin_lock(&fs_info->balance_lock);
2629 memset(&bctl->stat, 0, sizeof(bctl->stat));
2630 spin_unlock(&fs_info->balance_lock);
2631 again:
2632 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2633 key.offset = (u64)-1;
2634 key.type = BTRFS_CHUNK_ITEM_KEY;
2635
2636 while (1) {
2637 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2638 atomic_read(&fs_info->balance_cancel_req)) {
2639 ret = -ECANCELED;
2640 goto error;
2641 }
2642
2643 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2644 if (ret < 0)
2645 goto error;
2646
2647 /*
2648 * this shouldn't happen, it means the last relocate
2649 * failed
2650 */
2651 if (ret == 0)
2652 BUG(); /* FIXME break ? */
2653
2654 ret = btrfs_previous_item(chunk_root, path, 0,
2655 BTRFS_CHUNK_ITEM_KEY);
2656 if (ret) {
2657 ret = 0;
2658 break;
2659 }
2660
2661 leaf = path->nodes[0];
2662 slot = path->slots[0];
2663 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2664
2665 if (found_key.objectid != key.objectid)
2666 break;
2667
2668 /* chunk zero is special */
2669 if (found_key.offset == 0)
2670 break;
2671
2672 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2673
2674 if (!counting) {
2675 spin_lock(&fs_info->balance_lock);
2676 bctl->stat.considered++;
2677 spin_unlock(&fs_info->balance_lock);
2678 }
2679
2680 ret = should_balance_chunk(chunk_root, leaf, chunk,
2681 found_key.offset);
2682 btrfs_release_path(path);
2683 if (!ret)
2684 goto loop;
2685
2686 if (counting) {
2687 spin_lock(&fs_info->balance_lock);
2688 bctl->stat.expected++;
2689 spin_unlock(&fs_info->balance_lock);
2690 goto loop;
2691 }
2692
2693 ret = btrfs_relocate_chunk(chunk_root,
2694 chunk_root->root_key.objectid,
2695 found_key.objectid,
2696 found_key.offset);
2697 if (ret && ret != -ENOSPC)
2698 goto error;
2699 if (ret == -ENOSPC) {
2700 enospc_errors++;
2701 } else {
2702 spin_lock(&fs_info->balance_lock);
2703 bctl->stat.completed++;
2704 spin_unlock(&fs_info->balance_lock);
2705 }
2706 loop:
2707 key.offset = found_key.offset - 1;
2708 }
2709
2710 if (counting) {
2711 btrfs_release_path(path);
2712 counting = false;
2713 goto again;
2714 }
2715 error:
2716 btrfs_free_path(path);
2717 if (enospc_errors) {
2718 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2719 enospc_errors);
2720 if (!ret)
2721 ret = -ENOSPC;
2722 }
2723
2724 return ret;
2725 }
2726
2727 /**
2728 * alloc_profile_is_valid - see if a given profile is valid and reduced
2729 * @flags: profile to validate
2730 * @extended: if true @flags is treated as an extended profile
2731 */
2732 static int alloc_profile_is_valid(u64 flags, int extended)
2733 {
2734 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2735 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2736
2737 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2738
2739 /* 1) check that all other bits are zeroed */
2740 if (flags & ~mask)
2741 return 0;
2742
2743 /* 2) see if profile is reduced */
2744 if (flags == 0)
2745 return !extended; /* "0" is valid for usual profiles */
2746
2747 /* true if exactly one bit set */
2748 return (flags & (flags - 1)) == 0;
2749 }
2750
2751 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2752 {
2753 /* cancel requested || normal exit path */
2754 return atomic_read(&fs_info->balance_cancel_req) ||
2755 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2756 atomic_read(&fs_info->balance_cancel_req) == 0);
2757 }
2758
2759 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2760 {
2761 int ret;
2762
2763 unset_balance_control(fs_info);
2764 ret = del_balance_item(fs_info->tree_root);
2765 BUG_ON(ret);
2766 }
2767
2768 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2769 struct btrfs_ioctl_balance_args *bargs);
2770
2771 /*
2772 * Should be called with both balance and volume mutexes held
2773 */
2774 int btrfs_balance(struct btrfs_balance_control *bctl,
2775 struct btrfs_ioctl_balance_args *bargs)
2776 {
2777 struct btrfs_fs_info *fs_info = bctl->fs_info;
2778 u64 allowed;
2779 int mixed = 0;
2780 int ret;
2781
2782 if (btrfs_fs_closing(fs_info) ||
2783 atomic_read(&fs_info->balance_pause_req) ||
2784 atomic_read(&fs_info->balance_cancel_req)) {
2785 ret = -EINVAL;
2786 goto out;
2787 }
2788
2789 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2790 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2791 mixed = 1;
2792
2793 /*
2794 * In case of mixed groups both data and meta should be picked,
2795 * and identical options should be given for both of them.
2796 */
2797 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2798 if (mixed && (bctl->flags & allowed)) {
2799 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2800 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2801 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2802 printk(KERN_ERR "btrfs: with mixed groups data and "
2803 "metadata balance options must be the same\n");
2804 ret = -EINVAL;
2805 goto out;
2806 }
2807 }
2808
2809 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2810 if (fs_info->fs_devices->num_devices == 1)
2811 allowed |= BTRFS_BLOCK_GROUP_DUP;
2812 else if (fs_info->fs_devices->num_devices < 4)
2813 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2814 else
2815 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2816 BTRFS_BLOCK_GROUP_RAID10);
2817
2818 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2819 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2820 (bctl->data.target & ~allowed))) {
2821 printk(KERN_ERR "btrfs: unable to start balance with target "
2822 "data profile %llu\n",
2823 (unsigned long long)bctl->data.target);
2824 ret = -EINVAL;
2825 goto out;
2826 }
2827 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2828 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2829 (bctl->meta.target & ~allowed))) {
2830 printk(KERN_ERR "btrfs: unable to start balance with target "
2831 "metadata profile %llu\n",
2832 (unsigned long long)bctl->meta.target);
2833 ret = -EINVAL;
2834 goto out;
2835 }
2836 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2837 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2838 (bctl->sys.target & ~allowed))) {
2839 printk(KERN_ERR "btrfs: unable to start balance with target "
2840 "system profile %llu\n",
2841 (unsigned long long)bctl->sys.target);
2842 ret = -EINVAL;
2843 goto out;
2844 }
2845
2846 /* allow dup'ed data chunks only in mixed mode */
2847 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2848 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2849 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2850 ret = -EINVAL;
2851 goto out;
2852 }
2853
2854 /* allow to reduce meta or sys integrity only if force set */
2855 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2856 BTRFS_BLOCK_GROUP_RAID10;
2857 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2858 (fs_info->avail_system_alloc_bits & allowed) &&
2859 !(bctl->sys.target & allowed)) ||
2860 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2861 (fs_info->avail_metadata_alloc_bits & allowed) &&
2862 !(bctl->meta.target & allowed))) {
2863 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2864 printk(KERN_INFO "btrfs: force reducing metadata "
2865 "integrity\n");
2866 } else {
2867 printk(KERN_ERR "btrfs: balance will reduce metadata "
2868 "integrity, use force if you want this\n");
2869 ret = -EINVAL;
2870 goto out;
2871 }
2872 }
2873
2874 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2875 int num_tolerated_disk_barrier_failures;
2876 u64 target = bctl->sys.target;
2877
2878 num_tolerated_disk_barrier_failures =
2879 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2880 if (num_tolerated_disk_barrier_failures > 0 &&
2881 (target &
2882 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
2883 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
2884 num_tolerated_disk_barrier_failures = 0;
2885 else if (num_tolerated_disk_barrier_failures > 1 &&
2886 (target &
2887 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
2888 num_tolerated_disk_barrier_failures = 1;
2889
2890 fs_info->num_tolerated_disk_barrier_failures =
2891 num_tolerated_disk_barrier_failures;
2892 }
2893
2894 ret = insert_balance_item(fs_info->tree_root, bctl);
2895 if (ret && ret != -EEXIST)
2896 goto out;
2897
2898 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2899 BUG_ON(ret == -EEXIST);
2900 set_balance_control(bctl);
2901 } else {
2902 BUG_ON(ret != -EEXIST);
2903 spin_lock(&fs_info->balance_lock);
2904 update_balance_args(bctl);
2905 spin_unlock(&fs_info->balance_lock);
2906 }
2907
2908 atomic_inc(&fs_info->balance_running);
2909 mutex_unlock(&fs_info->balance_mutex);
2910
2911 ret = __btrfs_balance(fs_info);
2912
2913 mutex_lock(&fs_info->balance_mutex);
2914 atomic_dec(&fs_info->balance_running);
2915
2916 if (bargs) {
2917 memset(bargs, 0, sizeof(*bargs));
2918 update_ioctl_balance_args(fs_info, 0, bargs);
2919 }
2920
2921 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2922 balance_need_close(fs_info)) {
2923 __cancel_balance(fs_info);
2924 }
2925
2926 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2927 fs_info->num_tolerated_disk_barrier_failures =
2928 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2929 }
2930
2931 wake_up(&fs_info->balance_wait_q);
2932
2933 return ret;
2934 out:
2935 if (bctl->flags & BTRFS_BALANCE_RESUME)
2936 __cancel_balance(fs_info);
2937 else
2938 kfree(bctl);
2939 return ret;
2940 }
2941
2942 static int balance_kthread(void *data)
2943 {
2944 struct btrfs_fs_info *fs_info = data;
2945 int ret = 0;
2946
2947 mutex_lock(&fs_info->volume_mutex);
2948 mutex_lock(&fs_info->balance_mutex);
2949
2950 if (fs_info->balance_ctl) {
2951 printk(KERN_INFO "btrfs: continuing balance\n");
2952 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2953 }
2954
2955 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2956 mutex_unlock(&fs_info->balance_mutex);
2957 mutex_unlock(&fs_info->volume_mutex);
2958
2959 return ret;
2960 }
2961
2962 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2963 {
2964 struct task_struct *tsk;
2965
2966 spin_lock(&fs_info->balance_lock);
2967 if (!fs_info->balance_ctl) {
2968 spin_unlock(&fs_info->balance_lock);
2969 return 0;
2970 }
2971 spin_unlock(&fs_info->balance_lock);
2972
2973 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2974 printk(KERN_INFO "btrfs: force skipping balance\n");
2975 return 0;
2976 }
2977
2978 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
2979 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2980 if (IS_ERR(tsk))
2981 return PTR_ERR(tsk);
2982
2983 return 0;
2984 }
2985
2986 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2987 {
2988 struct btrfs_balance_control *bctl;
2989 struct btrfs_balance_item *item;
2990 struct btrfs_disk_balance_args disk_bargs;
2991 struct btrfs_path *path;
2992 struct extent_buffer *leaf;
2993 struct btrfs_key key;
2994 int ret;
2995
2996 path = btrfs_alloc_path();
2997 if (!path)
2998 return -ENOMEM;
2999
3000 key.objectid = BTRFS_BALANCE_OBJECTID;
3001 key.type = BTRFS_BALANCE_ITEM_KEY;
3002 key.offset = 0;
3003
3004 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3005 if (ret < 0)
3006 goto out;
3007 if (ret > 0) { /* ret = -ENOENT; */
3008 ret = 0;
3009 goto out;
3010 }
3011
3012 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3013 if (!bctl) {
3014 ret = -ENOMEM;
3015 goto out;
3016 }
3017
3018 leaf = path->nodes[0];
3019 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3020
3021 bctl->fs_info = fs_info;
3022 bctl->flags = btrfs_balance_flags(leaf, item);
3023 bctl->flags |= BTRFS_BALANCE_RESUME;
3024
3025 btrfs_balance_data(leaf, item, &disk_bargs);
3026 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3027 btrfs_balance_meta(leaf, item, &disk_bargs);
3028 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3029 btrfs_balance_sys(leaf, item, &disk_bargs);
3030 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3031
3032 mutex_lock(&fs_info->volume_mutex);
3033 mutex_lock(&fs_info->balance_mutex);
3034
3035 set_balance_control(bctl);
3036
3037 mutex_unlock(&fs_info->balance_mutex);
3038 mutex_unlock(&fs_info->volume_mutex);
3039 out:
3040 btrfs_free_path(path);
3041 return ret;
3042 }
3043
3044 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3045 {
3046 int ret = 0;
3047
3048 mutex_lock(&fs_info->balance_mutex);
3049 if (!fs_info->balance_ctl) {
3050 mutex_unlock(&fs_info->balance_mutex);
3051 return -ENOTCONN;
3052 }
3053
3054 if (atomic_read(&fs_info->balance_running)) {
3055 atomic_inc(&fs_info->balance_pause_req);
3056 mutex_unlock(&fs_info->balance_mutex);
3057
3058 wait_event(fs_info->balance_wait_q,
3059 atomic_read(&fs_info->balance_running) == 0);
3060
3061 mutex_lock(&fs_info->balance_mutex);
3062 /* we are good with balance_ctl ripped off from under us */
3063 BUG_ON(atomic_read(&fs_info->balance_running));
3064 atomic_dec(&fs_info->balance_pause_req);
3065 } else {
3066 ret = -ENOTCONN;
3067 }
3068
3069 mutex_unlock(&fs_info->balance_mutex);
3070 return ret;
3071 }
3072
3073 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3074 {
3075 mutex_lock(&fs_info->balance_mutex);
3076 if (!fs_info->balance_ctl) {
3077 mutex_unlock(&fs_info->balance_mutex);
3078 return -ENOTCONN;
3079 }
3080
3081 atomic_inc(&fs_info->balance_cancel_req);
3082 /*
3083 * if we are running just wait and return, balance item is
3084 * deleted in btrfs_balance in this case
3085 */
3086 if (atomic_read(&fs_info->balance_running)) {
3087 mutex_unlock(&fs_info->balance_mutex);
3088 wait_event(fs_info->balance_wait_q,
3089 atomic_read(&fs_info->balance_running) == 0);
3090 mutex_lock(&fs_info->balance_mutex);
3091 } else {
3092 /* __cancel_balance needs volume_mutex */
3093 mutex_unlock(&fs_info->balance_mutex);
3094 mutex_lock(&fs_info->volume_mutex);
3095 mutex_lock(&fs_info->balance_mutex);
3096
3097 if (fs_info->balance_ctl)
3098 __cancel_balance(fs_info);
3099
3100 mutex_unlock(&fs_info->volume_mutex);
3101 }
3102
3103 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3104 atomic_dec(&fs_info->balance_cancel_req);
3105 mutex_unlock(&fs_info->balance_mutex);
3106 return 0;
3107 }
3108
3109 /*
3110 * shrinking a device means finding all of the device extents past
3111 * the new size, and then following the back refs to the chunks.
3112 * The chunk relocation code actually frees the device extent
3113 */
3114 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3115 {
3116 struct btrfs_trans_handle *trans;
3117 struct btrfs_root *root = device->dev_root;
3118 struct btrfs_dev_extent *dev_extent = NULL;
3119 struct btrfs_path *path;
3120 u64 length;
3121 u64 chunk_tree;
3122 u64 chunk_objectid;
3123 u64 chunk_offset;
3124 int ret;
3125 int slot;
3126 int failed = 0;
3127 bool retried = false;
3128 struct extent_buffer *l;
3129 struct btrfs_key key;
3130 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3131 u64 old_total = btrfs_super_total_bytes(super_copy);
3132 u64 old_size = device->total_bytes;
3133 u64 diff = device->total_bytes - new_size;
3134
3135 path = btrfs_alloc_path();
3136 if (!path)
3137 return -ENOMEM;
3138
3139 path->reada = 2;
3140
3141 lock_chunks(root);
3142
3143 device->total_bytes = new_size;
3144 if (device->writeable) {
3145 device->fs_devices->total_rw_bytes -= diff;
3146 spin_lock(&root->fs_info->free_chunk_lock);
3147 root->fs_info->free_chunk_space -= diff;
3148 spin_unlock(&root->fs_info->free_chunk_lock);
3149 }
3150 unlock_chunks(root);
3151
3152 again:
3153 key.objectid = device->devid;
3154 key.offset = (u64)-1;
3155 key.type = BTRFS_DEV_EXTENT_KEY;
3156
3157 do {
3158 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3159 if (ret < 0)
3160 goto done;
3161
3162 ret = btrfs_previous_item(root, path, 0, key.type);
3163 if (ret < 0)
3164 goto done;
3165 if (ret) {
3166 ret = 0;
3167 btrfs_release_path(path);
3168 break;
3169 }
3170
3171 l = path->nodes[0];
3172 slot = path->slots[0];
3173 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3174
3175 if (key.objectid != device->devid) {
3176 btrfs_release_path(path);
3177 break;
3178 }
3179
3180 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3181 length = btrfs_dev_extent_length(l, dev_extent);
3182
3183 if (key.offset + length <= new_size) {
3184 btrfs_release_path(path);
3185 break;
3186 }
3187
3188 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3189 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3190 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3191 btrfs_release_path(path);
3192
3193 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3194 chunk_offset);
3195 if (ret && ret != -ENOSPC)
3196 goto done;
3197 if (ret == -ENOSPC)
3198 failed++;
3199 } while (key.offset-- > 0);
3200
3201 if (failed && !retried) {
3202 failed = 0;
3203 retried = true;
3204 goto again;
3205 } else if (failed && retried) {
3206 ret = -ENOSPC;
3207 lock_chunks(root);
3208
3209 device->total_bytes = old_size;
3210 if (device->writeable)
3211 device->fs_devices->total_rw_bytes += diff;
3212 spin_lock(&root->fs_info->free_chunk_lock);
3213 root->fs_info->free_chunk_space += diff;
3214 spin_unlock(&root->fs_info->free_chunk_lock);
3215 unlock_chunks(root);
3216 goto done;
3217 }
3218
3219 /* Shrinking succeeded, else we would be at "done". */
3220 trans = btrfs_start_transaction(root, 0);
3221 if (IS_ERR(trans)) {
3222 ret = PTR_ERR(trans);
3223 goto done;
3224 }
3225
3226 lock_chunks(root);
3227
3228 device->disk_total_bytes = new_size;
3229 /* Now btrfs_update_device() will change the on-disk size. */
3230 ret = btrfs_update_device(trans, device);
3231 if (ret) {
3232 unlock_chunks(root);
3233 btrfs_end_transaction(trans, root);
3234 goto done;
3235 }
3236 WARN_ON(diff > old_total);
3237 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3238 unlock_chunks(root);
3239 btrfs_end_transaction(trans, root);
3240 done:
3241 btrfs_free_path(path);
3242 return ret;
3243 }
3244
3245 static int btrfs_add_system_chunk(struct btrfs_root *root,
3246 struct btrfs_key *key,
3247 struct btrfs_chunk *chunk, int item_size)
3248 {
3249 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3250 struct btrfs_disk_key disk_key;
3251 u32 array_size;
3252 u8 *ptr;
3253
3254 array_size = btrfs_super_sys_array_size(super_copy);
3255 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3256 return -EFBIG;
3257
3258 ptr = super_copy->sys_chunk_array + array_size;
3259 btrfs_cpu_key_to_disk(&disk_key, key);
3260 memcpy(ptr, &disk_key, sizeof(disk_key));
3261 ptr += sizeof(disk_key);
3262 memcpy(ptr, chunk, item_size);
3263 item_size += sizeof(disk_key);
3264 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3265 return 0;
3266 }
3267
3268 /*
3269 * sort the devices in descending order by max_avail, total_avail
3270 */
3271 static int btrfs_cmp_device_info(const void *a, const void *b)
3272 {
3273 const struct btrfs_device_info *di_a = a;
3274 const struct btrfs_device_info *di_b = b;
3275
3276 if (di_a->max_avail > di_b->max_avail)
3277 return -1;
3278 if (di_a->max_avail < di_b->max_avail)
3279 return 1;
3280 if (di_a->total_avail > di_b->total_avail)
3281 return -1;
3282 if (di_a->total_avail < di_b->total_avail)
3283 return 1;
3284 return 0;
3285 }
3286
3287 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3288 struct btrfs_root *extent_root,
3289 struct map_lookup **map_ret,
3290 u64 *num_bytes_out, u64 *stripe_size_out,
3291 u64 start, u64 type)
3292 {
3293 struct btrfs_fs_info *info = extent_root->fs_info;
3294 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3295 struct list_head *cur;
3296 struct map_lookup *map = NULL;
3297 struct extent_map_tree *em_tree;
3298 struct extent_map *em;
3299 struct btrfs_device_info *devices_info = NULL;
3300 u64 total_avail;
3301 int num_stripes; /* total number of stripes to allocate */
3302 int sub_stripes; /* sub_stripes info for map */
3303 int dev_stripes; /* stripes per dev */
3304 int devs_max; /* max devs to use */
3305 int devs_min; /* min devs needed */
3306 int devs_increment; /* ndevs has to be a multiple of this */
3307 int ncopies; /* how many copies to data has */
3308 int ret;
3309 u64 max_stripe_size;
3310 u64 max_chunk_size;
3311 u64 stripe_size;
3312 u64 num_bytes;
3313 int ndevs;
3314 int i;
3315 int j;
3316
3317 BUG_ON(!alloc_profile_is_valid(type, 0));
3318
3319 if (list_empty(&fs_devices->alloc_list))
3320 return -ENOSPC;
3321
3322 sub_stripes = 1;
3323 dev_stripes = 1;
3324 devs_increment = 1;
3325 ncopies = 1;
3326 devs_max = 0; /* 0 == as many as possible */
3327 devs_min = 1;
3328
3329 /*
3330 * define the properties of each RAID type.
3331 * FIXME: move this to a global table and use it in all RAID
3332 * calculation code
3333 */
3334 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3335 dev_stripes = 2;
3336 ncopies = 2;
3337 devs_max = 1;
3338 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3339 devs_min = 2;
3340 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3341 devs_increment = 2;
3342 ncopies = 2;
3343 devs_max = 2;
3344 devs_min = 2;
3345 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3346 sub_stripes = 2;
3347 devs_increment = 2;
3348 ncopies = 2;
3349 devs_min = 4;
3350 } else {
3351 devs_max = 1;
3352 }
3353
3354 if (type & BTRFS_BLOCK_GROUP_DATA) {
3355 max_stripe_size = 1024 * 1024 * 1024;
3356 max_chunk_size = 10 * max_stripe_size;
3357 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3358 /* for larger filesystems, use larger metadata chunks */
3359 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3360 max_stripe_size = 1024 * 1024 * 1024;
3361 else
3362 max_stripe_size = 256 * 1024 * 1024;
3363 max_chunk_size = max_stripe_size;
3364 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3365 max_stripe_size = 32 * 1024 * 1024;
3366 max_chunk_size = 2 * max_stripe_size;
3367 } else {
3368 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3369 type);
3370 BUG_ON(1);
3371 }
3372
3373 /* we don't want a chunk larger than 10% of writeable space */
3374 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3375 max_chunk_size);
3376
3377 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3378 GFP_NOFS);
3379 if (!devices_info)
3380 return -ENOMEM;
3381
3382 cur = fs_devices->alloc_list.next;
3383
3384 /*
3385 * in the first pass through the devices list, we gather information
3386 * about the available holes on each device.
3387 */
3388 ndevs = 0;
3389 while (cur != &fs_devices->alloc_list) {
3390 struct btrfs_device *device;
3391 u64 max_avail;
3392 u64 dev_offset;
3393
3394 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3395
3396 cur = cur->next;
3397
3398 if (!device->writeable) {
3399 WARN(1, KERN_ERR
3400 "btrfs: read-only device in alloc_list\n");
3401 continue;
3402 }
3403
3404 if (!device->in_fs_metadata)
3405 continue;
3406
3407 if (device->total_bytes > device->bytes_used)
3408 total_avail = device->total_bytes - device->bytes_used;
3409 else
3410 total_avail = 0;
3411
3412 /* If there is no space on this device, skip it. */
3413 if (total_avail == 0)
3414 continue;
3415
3416 ret = find_free_dev_extent(device,
3417 max_stripe_size * dev_stripes,
3418 &dev_offset, &max_avail);
3419 if (ret && ret != -ENOSPC)
3420 goto error;
3421
3422 if (ret == 0)
3423 max_avail = max_stripe_size * dev_stripes;
3424
3425 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3426 continue;
3427
3428 devices_info[ndevs].dev_offset = dev_offset;
3429 devices_info[ndevs].max_avail = max_avail;
3430 devices_info[ndevs].total_avail = total_avail;
3431 devices_info[ndevs].dev = device;
3432 ++ndevs;
3433 }
3434
3435 /*
3436 * now sort the devices by hole size / available space
3437 */
3438 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3439 btrfs_cmp_device_info, NULL);
3440
3441 /* round down to number of usable stripes */
3442 ndevs -= ndevs % devs_increment;
3443
3444 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3445 ret = -ENOSPC;
3446 goto error;
3447 }
3448
3449 if (devs_max && ndevs > devs_max)
3450 ndevs = devs_max;
3451 /*
3452 * the primary goal is to maximize the number of stripes, so use as many
3453 * devices as possible, even if the stripes are not maximum sized.
3454 */
3455 stripe_size = devices_info[ndevs-1].max_avail;
3456 num_stripes = ndevs * dev_stripes;
3457
3458 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3459 stripe_size = max_chunk_size * ncopies;
3460 do_div(stripe_size, ndevs);
3461 }
3462
3463 do_div(stripe_size, dev_stripes);
3464
3465 /* align to BTRFS_STRIPE_LEN */
3466 do_div(stripe_size, BTRFS_STRIPE_LEN);
3467 stripe_size *= BTRFS_STRIPE_LEN;
3468
3469 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3470 if (!map) {
3471 ret = -ENOMEM;
3472 goto error;
3473 }
3474 map->num_stripes = num_stripes;
3475
3476 for (i = 0; i < ndevs; ++i) {
3477 for (j = 0; j < dev_stripes; ++j) {
3478 int s = i * dev_stripes + j;
3479 map->stripes[s].dev = devices_info[i].dev;
3480 map->stripes[s].physical = devices_info[i].dev_offset +
3481 j * stripe_size;
3482 }
3483 }
3484 map->sector_size = extent_root->sectorsize;
3485 map->stripe_len = BTRFS_STRIPE_LEN;
3486 map->io_align = BTRFS_STRIPE_LEN;
3487 map->io_width = BTRFS_STRIPE_LEN;
3488 map->type = type;
3489 map->sub_stripes = sub_stripes;
3490
3491 *map_ret = map;
3492 num_bytes = stripe_size * (num_stripes / ncopies);
3493
3494 *stripe_size_out = stripe_size;
3495 *num_bytes_out = num_bytes;
3496
3497 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3498
3499 em = alloc_extent_map();
3500 if (!em) {
3501 ret = -ENOMEM;
3502 goto error;
3503 }
3504 em->bdev = (struct block_device *)map;
3505 em->start = start;
3506 em->len = num_bytes;
3507 em->block_start = 0;
3508 em->block_len = em->len;
3509
3510 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3511 write_lock(&em_tree->lock);
3512 ret = add_extent_mapping(em_tree, em);
3513 write_unlock(&em_tree->lock);
3514 free_extent_map(em);
3515 if (ret)
3516 goto error;
3517
3518 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3519 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3520 start, num_bytes);
3521 if (ret)
3522 goto error;
3523
3524 for (i = 0; i < map->num_stripes; ++i) {
3525 struct btrfs_device *device;
3526 u64 dev_offset;
3527
3528 device = map->stripes[i].dev;
3529 dev_offset = map->stripes[i].physical;
3530
3531 ret = btrfs_alloc_dev_extent(trans, device,
3532 info->chunk_root->root_key.objectid,
3533 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3534 start, dev_offset, stripe_size);
3535 if (ret) {
3536 btrfs_abort_transaction(trans, extent_root, ret);
3537 goto error;
3538 }
3539 }
3540
3541 kfree(devices_info);
3542 return 0;
3543
3544 error:
3545 kfree(map);
3546 kfree(devices_info);
3547 return ret;
3548 }
3549
3550 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3551 struct btrfs_root *extent_root,
3552 struct map_lookup *map, u64 chunk_offset,
3553 u64 chunk_size, u64 stripe_size)
3554 {
3555 u64 dev_offset;
3556 struct btrfs_key key;
3557 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3558 struct btrfs_device *device;
3559 struct btrfs_chunk *chunk;
3560 struct btrfs_stripe *stripe;
3561 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3562 int index = 0;
3563 int ret;
3564
3565 chunk = kzalloc(item_size, GFP_NOFS);
3566 if (!chunk)
3567 return -ENOMEM;
3568
3569 index = 0;
3570 while (index < map->num_stripes) {
3571 device = map->stripes[index].dev;
3572 device->bytes_used += stripe_size;
3573 ret = btrfs_update_device(trans, device);
3574 if (ret)
3575 goto out_free;
3576 index++;
3577 }
3578
3579 spin_lock(&extent_root->fs_info->free_chunk_lock);
3580 extent_root->fs_info->free_chunk_space -= (stripe_size *
3581 map->num_stripes);
3582 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3583
3584 index = 0;
3585 stripe = &chunk->stripe;
3586 while (index < map->num_stripes) {
3587 device = map->stripes[index].dev;
3588 dev_offset = map->stripes[index].physical;
3589
3590 btrfs_set_stack_stripe_devid(stripe, device->devid);
3591 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3592 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3593 stripe++;
3594 index++;
3595 }
3596
3597 btrfs_set_stack_chunk_length(chunk, chunk_size);
3598 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3599 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3600 btrfs_set_stack_chunk_type(chunk, map->type);
3601 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3602 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3603 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3604 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3605 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3606
3607 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3608 key.type = BTRFS_CHUNK_ITEM_KEY;
3609 key.offset = chunk_offset;
3610
3611 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3612
3613 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3614 /*
3615 * TODO: Cleanup of inserted chunk root in case of
3616 * failure.
3617 */
3618 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3619 item_size);
3620 }
3621
3622 out_free:
3623 kfree(chunk);
3624 return ret;
3625 }
3626
3627 /*
3628 * Chunk allocation falls into two parts. The first part does works
3629 * that make the new allocated chunk useable, but not do any operation
3630 * that modifies the chunk tree. The second part does the works that
3631 * require modifying the chunk tree. This division is important for the
3632 * bootstrap process of adding storage to a seed btrfs.
3633 */
3634 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3635 struct btrfs_root *extent_root, u64 type)
3636 {
3637 u64 chunk_offset;
3638 u64 chunk_size;
3639 u64 stripe_size;
3640 struct map_lookup *map;
3641 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3642 int ret;
3643
3644 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3645 &chunk_offset);
3646 if (ret)
3647 return ret;
3648
3649 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3650 &stripe_size, chunk_offset, type);
3651 if (ret)
3652 return ret;
3653
3654 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3655 chunk_size, stripe_size);
3656 if (ret)
3657 return ret;
3658 return 0;
3659 }
3660
3661 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3662 struct btrfs_root *root,
3663 struct btrfs_device *device)
3664 {
3665 u64 chunk_offset;
3666 u64 sys_chunk_offset;
3667 u64 chunk_size;
3668 u64 sys_chunk_size;
3669 u64 stripe_size;
3670 u64 sys_stripe_size;
3671 u64 alloc_profile;
3672 struct map_lookup *map;
3673 struct map_lookup *sys_map;
3674 struct btrfs_fs_info *fs_info = root->fs_info;
3675 struct btrfs_root *extent_root = fs_info->extent_root;
3676 int ret;
3677
3678 ret = find_next_chunk(fs_info->chunk_root,
3679 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3680 if (ret)
3681 return ret;
3682
3683 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3684 fs_info->avail_metadata_alloc_bits;
3685 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3686
3687 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3688 &stripe_size, chunk_offset, alloc_profile);
3689 if (ret)
3690 return ret;
3691
3692 sys_chunk_offset = chunk_offset + chunk_size;
3693
3694 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3695 fs_info->avail_system_alloc_bits;
3696 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3697
3698 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3699 &sys_chunk_size, &sys_stripe_size,
3700 sys_chunk_offset, alloc_profile);
3701 if (ret) {
3702 btrfs_abort_transaction(trans, root, ret);
3703 goto out;
3704 }
3705
3706 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3707 if (ret) {
3708 btrfs_abort_transaction(trans, root, ret);
3709 goto out;
3710 }
3711
3712 /*
3713 * Modifying chunk tree needs allocating new blocks from both
3714 * system block group and metadata block group. So we only can
3715 * do operations require modifying the chunk tree after both
3716 * block groups were created.
3717 */
3718 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3719 chunk_size, stripe_size);
3720 if (ret) {
3721 btrfs_abort_transaction(trans, root, ret);
3722 goto out;
3723 }
3724
3725 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3726 sys_chunk_offset, sys_chunk_size,
3727 sys_stripe_size);
3728 if (ret)
3729 btrfs_abort_transaction(trans, root, ret);
3730
3731 out:
3732
3733 return ret;
3734 }
3735
3736 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3737 {
3738 struct extent_map *em;
3739 struct map_lookup *map;
3740 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3741 int readonly = 0;
3742 int i;
3743
3744 read_lock(&map_tree->map_tree.lock);
3745 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3746 read_unlock(&map_tree->map_tree.lock);
3747 if (!em)
3748 return 1;
3749
3750 if (btrfs_test_opt(root, DEGRADED)) {
3751 free_extent_map(em);
3752 return 0;
3753 }
3754
3755 map = (struct map_lookup *)em->bdev;
3756 for (i = 0; i < map->num_stripes; i++) {
3757 if (!map->stripes[i].dev->writeable) {
3758 readonly = 1;
3759 break;
3760 }
3761 }
3762 free_extent_map(em);
3763 return readonly;
3764 }
3765
3766 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3767 {
3768 extent_map_tree_init(&tree->map_tree);
3769 }
3770
3771 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3772 {
3773 struct extent_map *em;
3774
3775 while (1) {
3776 write_lock(&tree->map_tree.lock);
3777 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3778 if (em)
3779 remove_extent_mapping(&tree->map_tree, em);
3780 write_unlock(&tree->map_tree.lock);
3781 if (!em)
3782 break;
3783 kfree(em->bdev);
3784 /* once for us */
3785 free_extent_map(em);
3786 /* once for the tree */
3787 free_extent_map(em);
3788 }
3789 }
3790
3791 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3792 {
3793 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3794 struct extent_map *em;
3795 struct map_lookup *map;
3796 struct extent_map_tree *em_tree = &map_tree->map_tree;
3797 int ret;
3798
3799 read_lock(&em_tree->lock);
3800 em = lookup_extent_mapping(em_tree, logical, len);
3801 read_unlock(&em_tree->lock);
3802 BUG_ON(!em);
3803
3804 BUG_ON(em->start > logical || em->start + em->len < logical);
3805 map = (struct map_lookup *)em->bdev;
3806 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3807 ret = map->num_stripes;
3808 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3809 ret = map->sub_stripes;
3810 else
3811 ret = 1;
3812 free_extent_map(em);
3813 return ret;
3814 }
3815
3816 static int find_live_mirror(struct map_lookup *map, int first, int num,
3817 int optimal)
3818 {
3819 int i;
3820 if (map->stripes[optimal].dev->bdev)
3821 return optimal;
3822 for (i = first; i < first + num; i++) {
3823 if (map->stripes[i].dev->bdev)
3824 return i;
3825 }
3826 /* we couldn't find one that doesn't fail. Just return something
3827 * and the io error handling code will clean up eventually
3828 */
3829 return optimal;
3830 }
3831
3832 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
3833 u64 logical, u64 *length,
3834 struct btrfs_bio **bbio_ret,
3835 int mirror_num)
3836 {
3837 struct extent_map *em;
3838 struct map_lookup *map;
3839 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3840 struct extent_map_tree *em_tree = &map_tree->map_tree;
3841 u64 offset;
3842 u64 stripe_offset;
3843 u64 stripe_end_offset;
3844 u64 stripe_nr;
3845 u64 stripe_nr_orig;
3846 u64 stripe_nr_end;
3847 int stripe_index;
3848 int i;
3849 int ret = 0;
3850 int num_stripes;
3851 int max_errors = 0;
3852 struct btrfs_bio *bbio = NULL;
3853
3854 read_lock(&em_tree->lock);
3855 em = lookup_extent_mapping(em_tree, logical, *length);
3856 read_unlock(&em_tree->lock);
3857
3858 if (!em) {
3859 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
3860 (unsigned long long)logical,
3861 (unsigned long long)*length);
3862 BUG();
3863 }
3864
3865 BUG_ON(em->start > logical || em->start + em->len < logical);
3866 map = (struct map_lookup *)em->bdev;
3867 offset = logical - em->start;
3868
3869 if (mirror_num > map->num_stripes)
3870 mirror_num = 0;
3871
3872 stripe_nr = offset;
3873 /*
3874 * stripe_nr counts the total number of stripes we have to stride
3875 * to get to this block
3876 */
3877 do_div(stripe_nr, map->stripe_len);
3878
3879 stripe_offset = stripe_nr * map->stripe_len;
3880 BUG_ON(offset < stripe_offset);
3881
3882 /* stripe_offset is the offset of this block in its stripe*/
3883 stripe_offset = offset - stripe_offset;
3884
3885 if (rw & REQ_DISCARD)
3886 *length = min_t(u64, em->len - offset, *length);
3887 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3888 /* we limit the length of each bio to what fits in a stripe */
3889 *length = min_t(u64, em->len - offset,
3890 map->stripe_len - stripe_offset);
3891 } else {
3892 *length = em->len - offset;
3893 }
3894
3895 if (!bbio_ret)
3896 goto out;
3897
3898 num_stripes = 1;
3899 stripe_index = 0;
3900 stripe_nr_orig = stripe_nr;
3901 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3902 (~(map->stripe_len - 1));
3903 do_div(stripe_nr_end, map->stripe_len);
3904 stripe_end_offset = stripe_nr_end * map->stripe_len -
3905 (offset + *length);
3906 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3907 if (rw & REQ_DISCARD)
3908 num_stripes = min_t(u64, map->num_stripes,
3909 stripe_nr_end - stripe_nr_orig);
3910 stripe_index = do_div(stripe_nr, map->num_stripes);
3911 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3912 if (rw & (REQ_WRITE | REQ_DISCARD))
3913 num_stripes = map->num_stripes;
3914 else if (mirror_num)
3915 stripe_index = mirror_num - 1;
3916 else {
3917 stripe_index = find_live_mirror(map, 0,
3918 map->num_stripes,
3919 current->pid % map->num_stripes);
3920 mirror_num = stripe_index + 1;
3921 }
3922
3923 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3924 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3925 num_stripes = map->num_stripes;
3926 } else if (mirror_num) {
3927 stripe_index = mirror_num - 1;
3928 } else {
3929 mirror_num = 1;
3930 }
3931
3932 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3933 int factor = map->num_stripes / map->sub_stripes;
3934
3935 stripe_index = do_div(stripe_nr, factor);
3936 stripe_index *= map->sub_stripes;
3937
3938 if (rw & REQ_WRITE)
3939 num_stripes = map->sub_stripes;
3940 else if (rw & REQ_DISCARD)
3941 num_stripes = min_t(u64, map->sub_stripes *
3942 (stripe_nr_end - stripe_nr_orig),
3943 map->num_stripes);
3944 else if (mirror_num)
3945 stripe_index += mirror_num - 1;
3946 else {
3947 int old_stripe_index = stripe_index;
3948 stripe_index = find_live_mirror(map, stripe_index,
3949 map->sub_stripes, stripe_index +
3950 current->pid % map->sub_stripes);
3951 mirror_num = stripe_index - old_stripe_index + 1;
3952 }
3953 } else {
3954 /*
3955 * after this do_div call, stripe_nr is the number of stripes
3956 * on this device we have to walk to find the data, and
3957 * stripe_index is the number of our device in the stripe array
3958 */
3959 stripe_index = do_div(stripe_nr, map->num_stripes);
3960 mirror_num = stripe_index + 1;
3961 }
3962 BUG_ON(stripe_index >= map->num_stripes);
3963
3964 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3965 if (!bbio) {
3966 ret = -ENOMEM;
3967 goto out;
3968 }
3969 atomic_set(&bbio->error, 0);
3970
3971 if (rw & REQ_DISCARD) {
3972 int factor = 0;
3973 int sub_stripes = 0;
3974 u64 stripes_per_dev = 0;
3975 u32 remaining_stripes = 0;
3976 u32 last_stripe = 0;
3977
3978 if (map->type &
3979 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3980 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3981 sub_stripes = 1;
3982 else
3983 sub_stripes = map->sub_stripes;
3984
3985 factor = map->num_stripes / sub_stripes;
3986 stripes_per_dev = div_u64_rem(stripe_nr_end -
3987 stripe_nr_orig,
3988 factor,
3989 &remaining_stripes);
3990 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3991 last_stripe *= sub_stripes;
3992 }
3993
3994 for (i = 0; i < num_stripes; i++) {
3995 bbio->stripes[i].physical =
3996 map->stripes[stripe_index].physical +
3997 stripe_offset + stripe_nr * map->stripe_len;
3998 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3999
4000 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4001 BTRFS_BLOCK_GROUP_RAID10)) {
4002 bbio->stripes[i].length = stripes_per_dev *
4003 map->stripe_len;
4004
4005 if (i / sub_stripes < remaining_stripes)
4006 bbio->stripes[i].length +=
4007 map->stripe_len;
4008
4009 /*
4010 * Special for the first stripe and
4011 * the last stripe:
4012 *
4013 * |-------|...|-------|
4014 * |----------|
4015 * off end_off
4016 */
4017 if (i < sub_stripes)
4018 bbio->stripes[i].length -=
4019 stripe_offset;
4020
4021 if (stripe_index >= last_stripe &&
4022 stripe_index <= (last_stripe +
4023 sub_stripes - 1))
4024 bbio->stripes[i].length -=
4025 stripe_end_offset;
4026
4027 if (i == sub_stripes - 1)
4028 stripe_offset = 0;
4029 } else
4030 bbio->stripes[i].length = *length;
4031
4032 stripe_index++;
4033 if (stripe_index == map->num_stripes) {
4034 /* This could only happen for RAID0/10 */
4035 stripe_index = 0;
4036 stripe_nr++;
4037 }
4038 }
4039 } else {
4040 for (i = 0; i < num_stripes; i++) {
4041 bbio->stripes[i].physical =
4042 map->stripes[stripe_index].physical +
4043 stripe_offset +
4044 stripe_nr * map->stripe_len;
4045 bbio->stripes[i].dev =
4046 map->stripes[stripe_index].dev;
4047 stripe_index++;
4048 }
4049 }
4050
4051 if (rw & REQ_WRITE) {
4052 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4053 BTRFS_BLOCK_GROUP_RAID10 |
4054 BTRFS_BLOCK_GROUP_DUP)) {
4055 max_errors = 1;
4056 }
4057 }
4058
4059 *bbio_ret = bbio;
4060 bbio->num_stripes = num_stripes;
4061 bbio->max_errors = max_errors;
4062 bbio->mirror_num = mirror_num;
4063 out:
4064 free_extent_map(em);
4065 return ret;
4066 }
4067
4068 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4069 u64 logical, u64 *length,
4070 struct btrfs_bio **bbio_ret, int mirror_num)
4071 {
4072 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4073 mirror_num);
4074 }
4075
4076 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4077 u64 chunk_start, u64 physical, u64 devid,
4078 u64 **logical, int *naddrs, int *stripe_len)
4079 {
4080 struct extent_map_tree *em_tree = &map_tree->map_tree;
4081 struct extent_map *em;
4082 struct map_lookup *map;
4083 u64 *buf;
4084 u64 bytenr;
4085 u64 length;
4086 u64 stripe_nr;
4087 int i, j, nr = 0;
4088
4089 read_lock(&em_tree->lock);
4090 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4091 read_unlock(&em_tree->lock);
4092
4093 BUG_ON(!em || em->start != chunk_start);
4094 map = (struct map_lookup *)em->bdev;
4095
4096 length = em->len;
4097 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4098 do_div(length, map->num_stripes / map->sub_stripes);
4099 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4100 do_div(length, map->num_stripes);
4101
4102 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4103 BUG_ON(!buf); /* -ENOMEM */
4104
4105 for (i = 0; i < map->num_stripes; i++) {
4106 if (devid && map->stripes[i].dev->devid != devid)
4107 continue;
4108 if (map->stripes[i].physical > physical ||
4109 map->stripes[i].physical + length <= physical)
4110 continue;
4111
4112 stripe_nr = physical - map->stripes[i].physical;
4113 do_div(stripe_nr, map->stripe_len);
4114
4115 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4116 stripe_nr = stripe_nr * map->num_stripes + i;
4117 do_div(stripe_nr, map->sub_stripes);
4118 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4119 stripe_nr = stripe_nr * map->num_stripes + i;
4120 }
4121 bytenr = chunk_start + stripe_nr * map->stripe_len;
4122 WARN_ON(nr >= map->num_stripes);
4123 for (j = 0; j < nr; j++) {
4124 if (buf[j] == bytenr)
4125 break;
4126 }
4127 if (j == nr) {
4128 WARN_ON(nr >= map->num_stripes);
4129 buf[nr++] = bytenr;
4130 }
4131 }
4132
4133 *logical = buf;
4134 *naddrs = nr;
4135 *stripe_len = map->stripe_len;
4136
4137 free_extent_map(em);
4138 return 0;
4139 }
4140
4141 static void *merge_stripe_index_into_bio_private(void *bi_private,
4142 unsigned int stripe_index)
4143 {
4144 /*
4145 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4146 * at most 1.
4147 * The alternative solution (instead of stealing bits from the
4148 * pointer) would be to allocate an intermediate structure
4149 * that contains the old private pointer plus the stripe_index.
4150 */
4151 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4152 BUG_ON(stripe_index > 3);
4153 return (void *)(((uintptr_t)bi_private) | stripe_index);
4154 }
4155
4156 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4157 {
4158 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4159 }
4160
4161 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4162 {
4163 return (unsigned int)((uintptr_t)bi_private) & 3;
4164 }
4165
4166 static void btrfs_end_bio(struct bio *bio, int err)
4167 {
4168 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4169 int is_orig_bio = 0;
4170
4171 if (err) {
4172 atomic_inc(&bbio->error);
4173 if (err == -EIO || err == -EREMOTEIO) {
4174 unsigned int stripe_index =
4175 extract_stripe_index_from_bio_private(
4176 bio->bi_private);
4177 struct btrfs_device *dev;
4178
4179 BUG_ON(stripe_index >= bbio->num_stripes);
4180 dev = bbio->stripes[stripe_index].dev;
4181 if (dev->bdev) {
4182 if (bio->bi_rw & WRITE)
4183 btrfs_dev_stat_inc(dev,
4184 BTRFS_DEV_STAT_WRITE_ERRS);
4185 else
4186 btrfs_dev_stat_inc(dev,
4187 BTRFS_DEV_STAT_READ_ERRS);
4188 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4189 btrfs_dev_stat_inc(dev,
4190 BTRFS_DEV_STAT_FLUSH_ERRS);
4191 btrfs_dev_stat_print_on_error(dev);
4192 }
4193 }
4194 }
4195
4196 if (bio == bbio->orig_bio)
4197 is_orig_bio = 1;
4198
4199 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4200 if (!is_orig_bio) {
4201 bio_put(bio);
4202 bio = bbio->orig_bio;
4203 }
4204 bio->bi_private = bbio->private;
4205 bio->bi_end_io = bbio->end_io;
4206 bio->bi_bdev = (struct block_device *)
4207 (unsigned long)bbio->mirror_num;
4208 /* only send an error to the higher layers if it is
4209 * beyond the tolerance of the multi-bio
4210 */
4211 if (atomic_read(&bbio->error) > bbio->max_errors) {
4212 err = -EIO;
4213 } else {
4214 /*
4215 * this bio is actually up to date, we didn't
4216 * go over the max number of errors
4217 */
4218 set_bit(BIO_UPTODATE, &bio->bi_flags);
4219 err = 0;
4220 }
4221 kfree(bbio);
4222
4223 bio_endio(bio, err);
4224 } else if (!is_orig_bio) {
4225 bio_put(bio);
4226 }
4227 }
4228
4229 struct async_sched {
4230 struct bio *bio;
4231 int rw;
4232 struct btrfs_fs_info *info;
4233 struct btrfs_work work;
4234 };
4235
4236 /*
4237 * see run_scheduled_bios for a description of why bios are collected for
4238 * async submit.
4239 *
4240 * This will add one bio to the pending list for a device and make sure
4241 * the work struct is scheduled.
4242 */
4243 static noinline void schedule_bio(struct btrfs_root *root,
4244 struct btrfs_device *device,
4245 int rw, struct bio *bio)
4246 {
4247 int should_queue = 1;
4248 struct btrfs_pending_bios *pending_bios;
4249
4250 /* don't bother with additional async steps for reads, right now */
4251 if (!(rw & REQ_WRITE)) {
4252 bio_get(bio);
4253 btrfsic_submit_bio(rw, bio);
4254 bio_put(bio);
4255 return;
4256 }
4257
4258 /*
4259 * nr_async_bios allows us to reliably return congestion to the
4260 * higher layers. Otherwise, the async bio makes it appear we have
4261 * made progress against dirty pages when we've really just put it
4262 * on a queue for later
4263 */
4264 atomic_inc(&root->fs_info->nr_async_bios);
4265 WARN_ON(bio->bi_next);
4266 bio->bi_next = NULL;
4267 bio->bi_rw |= rw;
4268
4269 spin_lock(&device->io_lock);
4270 if (bio->bi_rw & REQ_SYNC)
4271 pending_bios = &device->pending_sync_bios;
4272 else
4273 pending_bios = &device->pending_bios;
4274
4275 if (pending_bios->tail)
4276 pending_bios->tail->bi_next = bio;
4277
4278 pending_bios->tail = bio;
4279 if (!pending_bios->head)
4280 pending_bios->head = bio;
4281 if (device->running_pending)
4282 should_queue = 0;
4283
4284 spin_unlock(&device->io_lock);
4285
4286 if (should_queue)
4287 btrfs_queue_worker(&root->fs_info->submit_workers,
4288 &device->work);
4289 }
4290
4291 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4292 sector_t sector)
4293 {
4294 struct bio_vec *prev;
4295 struct request_queue *q = bdev_get_queue(bdev);
4296 unsigned short max_sectors = queue_max_sectors(q);
4297 struct bvec_merge_data bvm = {
4298 .bi_bdev = bdev,
4299 .bi_sector = sector,
4300 .bi_rw = bio->bi_rw,
4301 };
4302
4303 if (bio->bi_vcnt == 0) {
4304 WARN_ON(1);
4305 return 1;
4306 }
4307
4308 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4309 if ((bio->bi_size >> 9) > max_sectors)
4310 return 0;
4311
4312 if (!q->merge_bvec_fn)
4313 return 1;
4314
4315 bvm.bi_size = bio->bi_size - prev->bv_len;
4316 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4317 return 0;
4318 return 1;
4319 }
4320
4321 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4322 struct bio *bio, u64 physical, int dev_nr,
4323 int rw, int async)
4324 {
4325 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4326
4327 bio->bi_private = bbio;
4328 bio->bi_private = merge_stripe_index_into_bio_private(
4329 bio->bi_private, (unsigned int)dev_nr);
4330 bio->bi_end_io = btrfs_end_bio;
4331 bio->bi_sector = physical >> 9;
4332 #ifdef DEBUG
4333 {
4334 struct rcu_string *name;
4335
4336 rcu_read_lock();
4337 name = rcu_dereference(dev->name);
4338 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4339 "(%s id %llu), size=%u\n", rw,
4340 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4341 name->str, dev->devid, bio->bi_size);
4342 rcu_read_unlock();
4343 }
4344 #endif
4345 bio->bi_bdev = dev->bdev;
4346 if (async)
4347 schedule_bio(root, dev, rw, bio);
4348 else
4349 btrfsic_submit_bio(rw, bio);
4350 }
4351
4352 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4353 struct bio *first_bio, struct btrfs_device *dev,
4354 int dev_nr, int rw, int async)
4355 {
4356 struct bio_vec *bvec = first_bio->bi_io_vec;
4357 struct bio *bio;
4358 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4359 u64 physical = bbio->stripes[dev_nr].physical;
4360
4361 again:
4362 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4363 if (!bio)
4364 return -ENOMEM;
4365
4366 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4367 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4368 bvec->bv_offset) < bvec->bv_len) {
4369 u64 len = bio->bi_size;
4370
4371 atomic_inc(&bbio->stripes_pending);
4372 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4373 rw, async);
4374 physical += len;
4375 goto again;
4376 }
4377 bvec++;
4378 }
4379
4380 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4381 return 0;
4382 }
4383
4384 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4385 {
4386 atomic_inc(&bbio->error);
4387 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4388 bio->bi_private = bbio->private;
4389 bio->bi_end_io = bbio->end_io;
4390 bio->bi_bdev = (struct block_device *)
4391 (unsigned long)bbio->mirror_num;
4392 bio->bi_sector = logical >> 9;
4393 kfree(bbio);
4394 bio_endio(bio, -EIO);
4395 }
4396 }
4397
4398 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4399 int mirror_num, int async_submit)
4400 {
4401 struct btrfs_device *dev;
4402 struct bio *first_bio = bio;
4403 u64 logical = (u64)bio->bi_sector << 9;
4404 u64 length = 0;
4405 u64 map_length;
4406 int ret;
4407 int dev_nr = 0;
4408 int total_devs = 1;
4409 struct btrfs_bio *bbio = NULL;
4410
4411 length = bio->bi_size;
4412 map_length = length;
4413
4414 ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
4415 mirror_num);
4416 if (ret) /* -ENOMEM */
4417 return ret;
4418
4419 total_devs = bbio->num_stripes;
4420 if (map_length < length) {
4421 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4422 "len %llu\n", (unsigned long long)logical,
4423 (unsigned long long)length,
4424 (unsigned long long)map_length);
4425 BUG();
4426 }
4427
4428 bbio->orig_bio = first_bio;
4429 bbio->private = first_bio->bi_private;
4430 bbio->end_io = first_bio->bi_end_io;
4431 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4432
4433 while (dev_nr < total_devs) {
4434 dev = bbio->stripes[dev_nr].dev;
4435 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4436 bbio_error(bbio, first_bio, logical);
4437 dev_nr++;
4438 continue;
4439 }
4440
4441 /*
4442 * Check and see if we're ok with this bio based on it's size
4443 * and offset with the given device.
4444 */
4445 if (!bio_size_ok(dev->bdev, first_bio,
4446 bbio->stripes[dev_nr].physical >> 9)) {
4447 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4448 dev_nr, rw, async_submit);
4449 BUG_ON(ret);
4450 dev_nr++;
4451 continue;
4452 }
4453
4454 if (dev_nr < total_devs - 1) {
4455 bio = bio_clone(first_bio, GFP_NOFS);
4456 BUG_ON(!bio); /* -ENOMEM */
4457 } else {
4458 bio = first_bio;
4459 }
4460
4461 submit_stripe_bio(root, bbio, bio,
4462 bbio->stripes[dev_nr].physical, dev_nr, rw,
4463 async_submit);
4464 dev_nr++;
4465 }
4466 return 0;
4467 }
4468
4469 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
4470 u8 *uuid, u8 *fsid)
4471 {
4472 struct btrfs_device *device;
4473 struct btrfs_fs_devices *cur_devices;
4474
4475 cur_devices = fs_info->fs_devices;
4476 while (cur_devices) {
4477 if (!fsid ||
4478 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4479 device = __find_device(&cur_devices->devices,
4480 devid, uuid);
4481 if (device)
4482 return device;
4483 }
4484 cur_devices = cur_devices->seed;
4485 }
4486 return NULL;
4487 }
4488
4489 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4490 u64 devid, u8 *dev_uuid)
4491 {
4492 struct btrfs_device *device;
4493 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4494
4495 device = kzalloc(sizeof(*device), GFP_NOFS);
4496 if (!device)
4497 return NULL;
4498 list_add(&device->dev_list,
4499 &fs_devices->devices);
4500 device->dev_root = root->fs_info->dev_root;
4501 device->devid = devid;
4502 device->work.func = pending_bios_fn;
4503 device->fs_devices = fs_devices;
4504 device->missing = 1;
4505 fs_devices->num_devices++;
4506 fs_devices->missing_devices++;
4507 spin_lock_init(&device->io_lock);
4508 INIT_LIST_HEAD(&device->dev_alloc_list);
4509 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4510 return device;
4511 }
4512
4513 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4514 struct extent_buffer *leaf,
4515 struct btrfs_chunk *chunk)
4516 {
4517 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4518 struct map_lookup *map;
4519 struct extent_map *em;
4520 u64 logical;
4521 u64 length;
4522 u64 devid;
4523 u8 uuid[BTRFS_UUID_SIZE];
4524 int num_stripes;
4525 int ret;
4526 int i;
4527
4528 logical = key->offset;
4529 length = btrfs_chunk_length(leaf, chunk);
4530
4531 read_lock(&map_tree->map_tree.lock);
4532 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4533 read_unlock(&map_tree->map_tree.lock);
4534
4535 /* already mapped? */
4536 if (em && em->start <= logical && em->start + em->len > logical) {
4537 free_extent_map(em);
4538 return 0;
4539 } else if (em) {
4540 free_extent_map(em);
4541 }
4542
4543 em = alloc_extent_map();
4544 if (!em)
4545 return -ENOMEM;
4546 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4547 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4548 if (!map) {
4549 free_extent_map(em);
4550 return -ENOMEM;
4551 }
4552
4553 em->bdev = (struct block_device *)map;
4554 em->start = logical;
4555 em->len = length;
4556 em->block_start = 0;
4557 em->block_len = em->len;
4558
4559 map->num_stripes = num_stripes;
4560 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4561 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4562 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4563 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4564 map->type = btrfs_chunk_type(leaf, chunk);
4565 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4566 for (i = 0; i < num_stripes; i++) {
4567 map->stripes[i].physical =
4568 btrfs_stripe_offset_nr(leaf, chunk, i);
4569 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4570 read_extent_buffer(leaf, uuid, (unsigned long)
4571 btrfs_stripe_dev_uuid_nr(chunk, i),
4572 BTRFS_UUID_SIZE);
4573 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
4574 uuid, NULL);
4575 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4576 kfree(map);
4577 free_extent_map(em);
4578 return -EIO;
4579 }
4580 if (!map->stripes[i].dev) {
4581 map->stripes[i].dev =
4582 add_missing_dev(root, devid, uuid);
4583 if (!map->stripes[i].dev) {
4584 kfree(map);
4585 free_extent_map(em);
4586 return -EIO;
4587 }
4588 }
4589 map->stripes[i].dev->in_fs_metadata = 1;
4590 }
4591
4592 write_lock(&map_tree->map_tree.lock);
4593 ret = add_extent_mapping(&map_tree->map_tree, em);
4594 write_unlock(&map_tree->map_tree.lock);
4595 BUG_ON(ret); /* Tree corruption */
4596 free_extent_map(em);
4597
4598 return 0;
4599 }
4600
4601 static void fill_device_from_item(struct extent_buffer *leaf,
4602 struct btrfs_dev_item *dev_item,
4603 struct btrfs_device *device)
4604 {
4605 unsigned long ptr;
4606
4607 device->devid = btrfs_device_id(leaf, dev_item);
4608 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4609 device->total_bytes = device->disk_total_bytes;
4610 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4611 device->type = btrfs_device_type(leaf, dev_item);
4612 device->io_align = btrfs_device_io_align(leaf, dev_item);
4613 device->io_width = btrfs_device_io_width(leaf, dev_item);
4614 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4615
4616 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4617 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4618 }
4619
4620 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4621 {
4622 struct btrfs_fs_devices *fs_devices;
4623 int ret;
4624
4625 BUG_ON(!mutex_is_locked(&uuid_mutex));
4626
4627 fs_devices = root->fs_info->fs_devices->seed;
4628 while (fs_devices) {
4629 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4630 ret = 0;
4631 goto out;
4632 }
4633 fs_devices = fs_devices->seed;
4634 }
4635
4636 fs_devices = find_fsid(fsid);
4637 if (!fs_devices) {
4638 ret = -ENOENT;
4639 goto out;
4640 }
4641
4642 fs_devices = clone_fs_devices(fs_devices);
4643 if (IS_ERR(fs_devices)) {
4644 ret = PTR_ERR(fs_devices);
4645 goto out;
4646 }
4647
4648 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4649 root->fs_info->bdev_holder);
4650 if (ret) {
4651 free_fs_devices(fs_devices);
4652 goto out;
4653 }
4654
4655 if (!fs_devices->seeding) {
4656 __btrfs_close_devices(fs_devices);
4657 free_fs_devices(fs_devices);
4658 ret = -EINVAL;
4659 goto out;
4660 }
4661
4662 fs_devices->seed = root->fs_info->fs_devices->seed;
4663 root->fs_info->fs_devices->seed = fs_devices;
4664 out:
4665 return ret;
4666 }
4667
4668 static int read_one_dev(struct btrfs_root *root,
4669 struct extent_buffer *leaf,
4670 struct btrfs_dev_item *dev_item)
4671 {
4672 struct btrfs_device *device;
4673 u64 devid;
4674 int ret;
4675 u8 fs_uuid[BTRFS_UUID_SIZE];
4676 u8 dev_uuid[BTRFS_UUID_SIZE];
4677
4678 devid = btrfs_device_id(leaf, dev_item);
4679 read_extent_buffer(leaf, dev_uuid,
4680 (unsigned long)btrfs_device_uuid(dev_item),
4681 BTRFS_UUID_SIZE);
4682 read_extent_buffer(leaf, fs_uuid,
4683 (unsigned long)btrfs_device_fsid(dev_item),
4684 BTRFS_UUID_SIZE);
4685
4686 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4687 ret = open_seed_devices(root, fs_uuid);
4688 if (ret && !btrfs_test_opt(root, DEGRADED))
4689 return ret;
4690 }
4691
4692 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
4693 if (!device || !device->bdev) {
4694 if (!btrfs_test_opt(root, DEGRADED))
4695 return -EIO;
4696
4697 if (!device) {
4698 printk(KERN_WARNING "warning devid %llu missing\n",
4699 (unsigned long long)devid);
4700 device = add_missing_dev(root, devid, dev_uuid);
4701 if (!device)
4702 return -ENOMEM;
4703 } else if (!device->missing) {
4704 /*
4705 * this happens when a device that was properly setup
4706 * in the device info lists suddenly goes bad.
4707 * device->bdev is NULL, and so we have to set
4708 * device->missing to one here
4709 */
4710 root->fs_info->fs_devices->missing_devices++;
4711 device->missing = 1;
4712 }
4713 }
4714
4715 if (device->fs_devices != root->fs_info->fs_devices) {
4716 BUG_ON(device->writeable);
4717 if (device->generation !=
4718 btrfs_device_generation(leaf, dev_item))
4719 return -EINVAL;
4720 }
4721
4722 fill_device_from_item(leaf, dev_item, device);
4723 device->dev_root = root->fs_info->dev_root;
4724 device->in_fs_metadata = 1;
4725 if (device->writeable) {
4726 device->fs_devices->total_rw_bytes += device->total_bytes;
4727 spin_lock(&root->fs_info->free_chunk_lock);
4728 root->fs_info->free_chunk_space += device->total_bytes -
4729 device->bytes_used;
4730 spin_unlock(&root->fs_info->free_chunk_lock);
4731 }
4732 ret = 0;
4733 return ret;
4734 }
4735
4736 int btrfs_read_sys_array(struct btrfs_root *root)
4737 {
4738 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4739 struct extent_buffer *sb;
4740 struct btrfs_disk_key *disk_key;
4741 struct btrfs_chunk *chunk;
4742 u8 *ptr;
4743 unsigned long sb_ptr;
4744 int ret = 0;
4745 u32 num_stripes;
4746 u32 array_size;
4747 u32 len = 0;
4748 u32 cur;
4749 struct btrfs_key key;
4750
4751 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4752 BTRFS_SUPER_INFO_SIZE);
4753 if (!sb)
4754 return -ENOMEM;
4755 btrfs_set_buffer_uptodate(sb);
4756 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4757 /*
4758 * The sb extent buffer is artifical and just used to read the system array.
4759 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4760 * pages up-to-date when the page is larger: extent does not cover the
4761 * whole page and consequently check_page_uptodate does not find all
4762 * the page's extents up-to-date (the hole beyond sb),
4763 * write_extent_buffer then triggers a WARN_ON.
4764 *
4765 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4766 * but sb spans only this function. Add an explicit SetPageUptodate call
4767 * to silence the warning eg. on PowerPC 64.
4768 */
4769 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4770 SetPageUptodate(sb->pages[0]);
4771
4772 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4773 array_size = btrfs_super_sys_array_size(super_copy);
4774
4775 ptr = super_copy->sys_chunk_array;
4776 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4777 cur = 0;
4778
4779 while (cur < array_size) {
4780 disk_key = (struct btrfs_disk_key *)ptr;
4781 btrfs_disk_key_to_cpu(&key, disk_key);
4782
4783 len = sizeof(*disk_key); ptr += len;
4784 sb_ptr += len;
4785 cur += len;
4786
4787 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4788 chunk = (struct btrfs_chunk *)sb_ptr;
4789 ret = read_one_chunk(root, &key, sb, chunk);
4790 if (ret)
4791 break;
4792 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4793 len = btrfs_chunk_item_size(num_stripes);
4794 } else {
4795 ret = -EIO;
4796 break;
4797 }
4798 ptr += len;
4799 sb_ptr += len;
4800 cur += len;
4801 }
4802 free_extent_buffer(sb);
4803 return ret;
4804 }
4805
4806 int btrfs_read_chunk_tree(struct btrfs_root *root)
4807 {
4808 struct btrfs_path *path;
4809 struct extent_buffer *leaf;
4810 struct btrfs_key key;
4811 struct btrfs_key found_key;
4812 int ret;
4813 int slot;
4814
4815 root = root->fs_info->chunk_root;
4816
4817 path = btrfs_alloc_path();
4818 if (!path)
4819 return -ENOMEM;
4820
4821 mutex_lock(&uuid_mutex);
4822 lock_chunks(root);
4823
4824 /* first we search for all of the device items, and then we
4825 * read in all of the chunk items. This way we can create chunk
4826 * mappings that reference all of the devices that are afound
4827 */
4828 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4829 key.offset = 0;
4830 key.type = 0;
4831 again:
4832 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4833 if (ret < 0)
4834 goto error;
4835 while (1) {
4836 leaf = path->nodes[0];
4837 slot = path->slots[0];
4838 if (slot >= btrfs_header_nritems(leaf)) {
4839 ret = btrfs_next_leaf(root, path);
4840 if (ret == 0)
4841 continue;
4842 if (ret < 0)
4843 goto error;
4844 break;
4845 }
4846 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4847 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4848 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4849 break;
4850 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4851 struct btrfs_dev_item *dev_item;
4852 dev_item = btrfs_item_ptr(leaf, slot,
4853 struct btrfs_dev_item);
4854 ret = read_one_dev(root, leaf, dev_item);
4855 if (ret)
4856 goto error;
4857 }
4858 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4859 struct btrfs_chunk *chunk;
4860 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4861 ret = read_one_chunk(root, &found_key, leaf, chunk);
4862 if (ret)
4863 goto error;
4864 }
4865 path->slots[0]++;
4866 }
4867 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4868 key.objectid = 0;
4869 btrfs_release_path(path);
4870 goto again;
4871 }
4872 ret = 0;
4873 error:
4874 unlock_chunks(root);
4875 mutex_unlock(&uuid_mutex);
4876
4877 btrfs_free_path(path);
4878 return ret;
4879 }
4880
4881 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4882 {
4883 int i;
4884
4885 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4886 btrfs_dev_stat_reset(dev, i);
4887 }
4888
4889 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4890 {
4891 struct btrfs_key key;
4892 struct btrfs_key found_key;
4893 struct btrfs_root *dev_root = fs_info->dev_root;
4894 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4895 struct extent_buffer *eb;
4896 int slot;
4897 int ret = 0;
4898 struct btrfs_device *device;
4899 struct btrfs_path *path = NULL;
4900 int i;
4901
4902 path = btrfs_alloc_path();
4903 if (!path) {
4904 ret = -ENOMEM;
4905 goto out;
4906 }
4907
4908 mutex_lock(&fs_devices->device_list_mutex);
4909 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4910 int item_size;
4911 struct btrfs_dev_stats_item *ptr;
4912
4913 key.objectid = 0;
4914 key.type = BTRFS_DEV_STATS_KEY;
4915 key.offset = device->devid;
4916 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4917 if (ret) {
4918 __btrfs_reset_dev_stats(device);
4919 device->dev_stats_valid = 1;
4920 btrfs_release_path(path);
4921 continue;
4922 }
4923 slot = path->slots[0];
4924 eb = path->nodes[0];
4925 btrfs_item_key_to_cpu(eb, &found_key, slot);
4926 item_size = btrfs_item_size_nr(eb, slot);
4927
4928 ptr = btrfs_item_ptr(eb, slot,
4929 struct btrfs_dev_stats_item);
4930
4931 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4932 if (item_size >= (1 + i) * sizeof(__le64))
4933 btrfs_dev_stat_set(device, i,
4934 btrfs_dev_stats_value(eb, ptr, i));
4935 else
4936 btrfs_dev_stat_reset(device, i);
4937 }
4938
4939 device->dev_stats_valid = 1;
4940 btrfs_dev_stat_print_on_load(device);
4941 btrfs_release_path(path);
4942 }
4943 mutex_unlock(&fs_devices->device_list_mutex);
4944
4945 out:
4946 btrfs_free_path(path);
4947 return ret < 0 ? ret : 0;
4948 }
4949
4950 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4951 struct btrfs_root *dev_root,
4952 struct btrfs_device *device)
4953 {
4954 struct btrfs_path *path;
4955 struct btrfs_key key;
4956 struct extent_buffer *eb;
4957 struct btrfs_dev_stats_item *ptr;
4958 int ret;
4959 int i;
4960
4961 key.objectid = 0;
4962 key.type = BTRFS_DEV_STATS_KEY;
4963 key.offset = device->devid;
4964
4965 path = btrfs_alloc_path();
4966 BUG_ON(!path);
4967 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4968 if (ret < 0) {
4969 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4970 ret, rcu_str_deref(device->name));
4971 goto out;
4972 }
4973
4974 if (ret == 0 &&
4975 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4976 /* need to delete old one and insert a new one */
4977 ret = btrfs_del_item(trans, dev_root, path);
4978 if (ret != 0) {
4979 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4980 rcu_str_deref(device->name), ret);
4981 goto out;
4982 }
4983 ret = 1;
4984 }
4985
4986 if (ret == 1) {
4987 /* need to insert a new item */
4988 btrfs_release_path(path);
4989 ret = btrfs_insert_empty_item(trans, dev_root, path,
4990 &key, sizeof(*ptr));
4991 if (ret < 0) {
4992 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4993 rcu_str_deref(device->name), ret);
4994 goto out;
4995 }
4996 }
4997
4998 eb = path->nodes[0];
4999 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5000 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5001 btrfs_set_dev_stats_value(eb, ptr, i,
5002 btrfs_dev_stat_read(device, i));
5003 btrfs_mark_buffer_dirty(eb);
5004
5005 out:
5006 btrfs_free_path(path);
5007 return ret;
5008 }
5009
5010 /*
5011 * called from commit_transaction. Writes all changed device stats to disk.
5012 */
5013 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5014 struct btrfs_fs_info *fs_info)
5015 {
5016 struct btrfs_root *dev_root = fs_info->dev_root;
5017 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5018 struct btrfs_device *device;
5019 int ret = 0;
5020
5021 mutex_lock(&fs_devices->device_list_mutex);
5022 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5023 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5024 continue;
5025
5026 ret = update_dev_stat_item(trans, dev_root, device);
5027 if (!ret)
5028 device->dev_stats_dirty = 0;
5029 }
5030 mutex_unlock(&fs_devices->device_list_mutex);
5031
5032 return ret;
5033 }
5034
5035 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5036 {
5037 btrfs_dev_stat_inc(dev, index);
5038 btrfs_dev_stat_print_on_error(dev);
5039 }
5040
5041 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5042 {
5043 if (!dev->dev_stats_valid)
5044 return;
5045 printk_ratelimited_in_rcu(KERN_ERR
5046 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5047 rcu_str_deref(dev->name),
5048 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5049 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5050 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5051 btrfs_dev_stat_read(dev,
5052 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5053 btrfs_dev_stat_read(dev,
5054 BTRFS_DEV_STAT_GENERATION_ERRS));
5055 }
5056
5057 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5058 {
5059 int i;
5060
5061 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5062 if (btrfs_dev_stat_read(dev, i) != 0)
5063 break;
5064 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5065 return; /* all values == 0, suppress message */
5066
5067 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5068 rcu_str_deref(dev->name),
5069 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5070 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5071 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5072 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5073 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5074 }
5075
5076 int btrfs_get_dev_stats(struct btrfs_root *root,
5077 struct btrfs_ioctl_get_dev_stats *stats)
5078 {
5079 struct btrfs_device *dev;
5080 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5081 int i;
5082
5083 mutex_lock(&fs_devices->device_list_mutex);
5084 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5085 mutex_unlock(&fs_devices->device_list_mutex);
5086
5087 if (!dev) {
5088 printk(KERN_WARNING
5089 "btrfs: get dev_stats failed, device not found\n");
5090 return -ENODEV;
5091 } else if (!dev->dev_stats_valid) {
5092 printk(KERN_WARNING
5093 "btrfs: get dev_stats failed, not yet valid\n");
5094 return -ENODEV;
5095 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5096 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5097 if (stats->nr_items > i)
5098 stats->values[i] =
5099 btrfs_dev_stat_read_and_reset(dev, i);
5100 else
5101 btrfs_dev_stat_reset(dev, i);
5102 }
5103 } else {
5104 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5105 if (stats->nr_items > i)
5106 stats->values[i] = btrfs_dev_stat_read(dev, i);
5107 }
5108 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5109 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5110 return 0;
5111 }
5112
5113 int btrfs_scratch_superblock(struct btrfs_device *device)
5114 {
5115 struct buffer_head *bh;
5116 struct btrfs_super_block *disk_super;
5117
5118 bh = btrfs_read_dev_super(device->bdev);
5119 if (!bh)
5120 return -EINVAL;
5121 disk_super = (struct btrfs_super_block *)bh->b_data;
5122
5123 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5124 set_buffer_dirty(bh);
5125 sync_dirty_buffer(bh);
5126 brelse(bh);
5127
5128 return 0;
5129 }
This page took 0.181082 seconds and 5 git commands to generate.