btrfs: Notify udev when removing device
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39 #include "dev-replace.h"
40
41 static int init_first_rw_device(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 struct btrfs_device *device);
44 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
45 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
46 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47
48 static DEFINE_MUTEX(uuid_mutex);
49 static LIST_HEAD(fs_uuids);
50
51 static void lock_chunks(struct btrfs_root *root)
52 {
53 mutex_lock(&root->fs_info->chunk_mutex);
54 }
55
56 static void unlock_chunks(struct btrfs_root *root)
57 {
58 mutex_unlock(&root->fs_info->chunk_mutex);
59 }
60
61 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 {
63 struct btrfs_device *device;
64 WARN_ON(fs_devices->opened);
65 while (!list_empty(&fs_devices->devices)) {
66 device = list_entry(fs_devices->devices.next,
67 struct btrfs_device, dev_list);
68 list_del(&device->dev_list);
69 rcu_string_free(device->name);
70 kfree(device);
71 }
72 kfree(fs_devices);
73 }
74
75 static void btrfs_kobject_uevent(struct block_device *bdev,
76 enum kobject_action action)
77 {
78 int ret;
79
80 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
81 if (ret)
82 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
83 action,
84 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
85 &disk_to_dev(bdev->bd_disk)->kobj);
86 }
87
88 void btrfs_cleanup_fs_uuids(void)
89 {
90 struct btrfs_fs_devices *fs_devices;
91
92 while (!list_empty(&fs_uuids)) {
93 fs_devices = list_entry(fs_uuids.next,
94 struct btrfs_fs_devices, list);
95 list_del(&fs_devices->list);
96 free_fs_devices(fs_devices);
97 }
98 }
99
100 static noinline struct btrfs_device *__find_device(struct list_head *head,
101 u64 devid, u8 *uuid)
102 {
103 struct btrfs_device *dev;
104
105 list_for_each_entry(dev, head, dev_list) {
106 if (dev->devid == devid &&
107 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
108 return dev;
109 }
110 }
111 return NULL;
112 }
113
114 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
115 {
116 struct btrfs_fs_devices *fs_devices;
117
118 list_for_each_entry(fs_devices, &fs_uuids, list) {
119 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
120 return fs_devices;
121 }
122 return NULL;
123 }
124
125 static int
126 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
127 int flush, struct block_device **bdev,
128 struct buffer_head **bh)
129 {
130 int ret;
131
132 *bdev = blkdev_get_by_path(device_path, flags, holder);
133
134 if (IS_ERR(*bdev)) {
135 ret = PTR_ERR(*bdev);
136 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
137 goto error;
138 }
139
140 if (flush)
141 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
142 ret = set_blocksize(*bdev, 4096);
143 if (ret) {
144 blkdev_put(*bdev, flags);
145 goto error;
146 }
147 invalidate_bdev(*bdev);
148 *bh = btrfs_read_dev_super(*bdev);
149 if (!*bh) {
150 ret = -EINVAL;
151 blkdev_put(*bdev, flags);
152 goto error;
153 }
154
155 return 0;
156
157 error:
158 *bdev = NULL;
159 *bh = NULL;
160 return ret;
161 }
162
163 static void requeue_list(struct btrfs_pending_bios *pending_bios,
164 struct bio *head, struct bio *tail)
165 {
166
167 struct bio *old_head;
168
169 old_head = pending_bios->head;
170 pending_bios->head = head;
171 if (pending_bios->tail)
172 tail->bi_next = old_head;
173 else
174 pending_bios->tail = tail;
175 }
176
177 /*
178 * we try to collect pending bios for a device so we don't get a large
179 * number of procs sending bios down to the same device. This greatly
180 * improves the schedulers ability to collect and merge the bios.
181 *
182 * But, it also turns into a long list of bios to process and that is sure
183 * to eventually make the worker thread block. The solution here is to
184 * make some progress and then put this work struct back at the end of
185 * the list if the block device is congested. This way, multiple devices
186 * can make progress from a single worker thread.
187 */
188 static noinline void run_scheduled_bios(struct btrfs_device *device)
189 {
190 struct bio *pending;
191 struct backing_dev_info *bdi;
192 struct btrfs_fs_info *fs_info;
193 struct btrfs_pending_bios *pending_bios;
194 struct bio *tail;
195 struct bio *cur;
196 int again = 0;
197 unsigned long num_run;
198 unsigned long batch_run = 0;
199 unsigned long limit;
200 unsigned long last_waited = 0;
201 int force_reg = 0;
202 int sync_pending = 0;
203 struct blk_plug plug;
204
205 /*
206 * this function runs all the bios we've collected for
207 * a particular device. We don't want to wander off to
208 * another device without first sending all of these down.
209 * So, setup a plug here and finish it off before we return
210 */
211 blk_start_plug(&plug);
212
213 bdi = blk_get_backing_dev_info(device->bdev);
214 fs_info = device->dev_root->fs_info;
215 limit = btrfs_async_submit_limit(fs_info);
216 limit = limit * 2 / 3;
217
218 loop:
219 spin_lock(&device->io_lock);
220
221 loop_lock:
222 num_run = 0;
223
224 /* take all the bios off the list at once and process them
225 * later on (without the lock held). But, remember the
226 * tail and other pointers so the bios can be properly reinserted
227 * into the list if we hit congestion
228 */
229 if (!force_reg && device->pending_sync_bios.head) {
230 pending_bios = &device->pending_sync_bios;
231 force_reg = 1;
232 } else {
233 pending_bios = &device->pending_bios;
234 force_reg = 0;
235 }
236
237 pending = pending_bios->head;
238 tail = pending_bios->tail;
239 WARN_ON(pending && !tail);
240
241 /*
242 * if pending was null this time around, no bios need processing
243 * at all and we can stop. Otherwise it'll loop back up again
244 * and do an additional check so no bios are missed.
245 *
246 * device->running_pending is used to synchronize with the
247 * schedule_bio code.
248 */
249 if (device->pending_sync_bios.head == NULL &&
250 device->pending_bios.head == NULL) {
251 again = 0;
252 device->running_pending = 0;
253 } else {
254 again = 1;
255 device->running_pending = 1;
256 }
257
258 pending_bios->head = NULL;
259 pending_bios->tail = NULL;
260
261 spin_unlock(&device->io_lock);
262
263 while (pending) {
264
265 rmb();
266 /* we want to work on both lists, but do more bios on the
267 * sync list than the regular list
268 */
269 if ((num_run > 32 &&
270 pending_bios != &device->pending_sync_bios &&
271 device->pending_sync_bios.head) ||
272 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
273 device->pending_bios.head)) {
274 spin_lock(&device->io_lock);
275 requeue_list(pending_bios, pending, tail);
276 goto loop_lock;
277 }
278
279 cur = pending;
280 pending = pending->bi_next;
281 cur->bi_next = NULL;
282
283 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
284 waitqueue_active(&fs_info->async_submit_wait))
285 wake_up(&fs_info->async_submit_wait);
286
287 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
288
289 /*
290 * if we're doing the sync list, record that our
291 * plug has some sync requests on it
292 *
293 * If we're doing the regular list and there are
294 * sync requests sitting around, unplug before
295 * we add more
296 */
297 if (pending_bios == &device->pending_sync_bios) {
298 sync_pending = 1;
299 } else if (sync_pending) {
300 blk_finish_plug(&plug);
301 blk_start_plug(&plug);
302 sync_pending = 0;
303 }
304
305 btrfsic_submit_bio(cur->bi_rw, cur);
306 num_run++;
307 batch_run++;
308 if (need_resched())
309 cond_resched();
310
311 /*
312 * we made progress, there is more work to do and the bdi
313 * is now congested. Back off and let other work structs
314 * run instead
315 */
316 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
317 fs_info->fs_devices->open_devices > 1) {
318 struct io_context *ioc;
319
320 ioc = current->io_context;
321
322 /*
323 * the main goal here is that we don't want to
324 * block if we're going to be able to submit
325 * more requests without blocking.
326 *
327 * This code does two great things, it pokes into
328 * the elevator code from a filesystem _and_
329 * it makes assumptions about how batching works.
330 */
331 if (ioc && ioc->nr_batch_requests > 0 &&
332 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
333 (last_waited == 0 ||
334 ioc->last_waited == last_waited)) {
335 /*
336 * we want to go through our batch of
337 * requests and stop. So, we copy out
338 * the ioc->last_waited time and test
339 * against it before looping
340 */
341 last_waited = ioc->last_waited;
342 if (need_resched())
343 cond_resched();
344 continue;
345 }
346 spin_lock(&device->io_lock);
347 requeue_list(pending_bios, pending, tail);
348 device->running_pending = 1;
349
350 spin_unlock(&device->io_lock);
351 btrfs_requeue_work(&device->work);
352 goto done;
353 }
354 /* unplug every 64 requests just for good measure */
355 if (batch_run % 64 == 0) {
356 blk_finish_plug(&plug);
357 blk_start_plug(&plug);
358 sync_pending = 0;
359 }
360 }
361
362 cond_resched();
363 if (again)
364 goto loop;
365
366 spin_lock(&device->io_lock);
367 if (device->pending_bios.head || device->pending_sync_bios.head)
368 goto loop_lock;
369 spin_unlock(&device->io_lock);
370
371 done:
372 blk_finish_plug(&plug);
373 }
374
375 static void pending_bios_fn(struct btrfs_work *work)
376 {
377 struct btrfs_device *device;
378
379 device = container_of(work, struct btrfs_device, work);
380 run_scheduled_bios(device);
381 }
382
383 static noinline int device_list_add(const char *path,
384 struct btrfs_super_block *disk_super,
385 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
386 {
387 struct btrfs_device *device;
388 struct btrfs_fs_devices *fs_devices;
389 struct rcu_string *name;
390 u64 found_transid = btrfs_super_generation(disk_super);
391
392 fs_devices = find_fsid(disk_super->fsid);
393 if (!fs_devices) {
394 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
395 if (!fs_devices)
396 return -ENOMEM;
397 INIT_LIST_HEAD(&fs_devices->devices);
398 INIT_LIST_HEAD(&fs_devices->alloc_list);
399 list_add(&fs_devices->list, &fs_uuids);
400 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
401 fs_devices->latest_devid = devid;
402 fs_devices->latest_trans = found_transid;
403 mutex_init(&fs_devices->device_list_mutex);
404 device = NULL;
405 } else {
406 device = __find_device(&fs_devices->devices, devid,
407 disk_super->dev_item.uuid);
408 }
409 if (!device) {
410 if (fs_devices->opened)
411 return -EBUSY;
412
413 device = kzalloc(sizeof(*device), GFP_NOFS);
414 if (!device) {
415 /* we can safely leave the fs_devices entry around */
416 return -ENOMEM;
417 }
418 device->devid = devid;
419 device->dev_stats_valid = 0;
420 device->work.func = pending_bios_fn;
421 memcpy(device->uuid, disk_super->dev_item.uuid,
422 BTRFS_UUID_SIZE);
423 spin_lock_init(&device->io_lock);
424
425 name = rcu_string_strdup(path, GFP_NOFS);
426 if (!name) {
427 kfree(device);
428 return -ENOMEM;
429 }
430 rcu_assign_pointer(device->name, name);
431 INIT_LIST_HEAD(&device->dev_alloc_list);
432
433 /* init readahead state */
434 spin_lock_init(&device->reada_lock);
435 device->reada_curr_zone = NULL;
436 atomic_set(&device->reada_in_flight, 0);
437 device->reada_next = 0;
438 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
439 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
440
441 mutex_lock(&fs_devices->device_list_mutex);
442 list_add_rcu(&device->dev_list, &fs_devices->devices);
443 mutex_unlock(&fs_devices->device_list_mutex);
444
445 device->fs_devices = fs_devices;
446 fs_devices->num_devices++;
447 } else if (!device->name || strcmp(device->name->str, path)) {
448 name = rcu_string_strdup(path, GFP_NOFS);
449 if (!name)
450 return -ENOMEM;
451 rcu_string_free(device->name);
452 rcu_assign_pointer(device->name, name);
453 if (device->missing) {
454 fs_devices->missing_devices--;
455 device->missing = 0;
456 }
457 }
458
459 if (found_transid > fs_devices->latest_trans) {
460 fs_devices->latest_devid = devid;
461 fs_devices->latest_trans = found_transid;
462 }
463 *fs_devices_ret = fs_devices;
464 return 0;
465 }
466
467 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
468 {
469 struct btrfs_fs_devices *fs_devices;
470 struct btrfs_device *device;
471 struct btrfs_device *orig_dev;
472
473 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
474 if (!fs_devices)
475 return ERR_PTR(-ENOMEM);
476
477 INIT_LIST_HEAD(&fs_devices->devices);
478 INIT_LIST_HEAD(&fs_devices->alloc_list);
479 INIT_LIST_HEAD(&fs_devices->list);
480 mutex_init(&fs_devices->device_list_mutex);
481 fs_devices->latest_devid = orig->latest_devid;
482 fs_devices->latest_trans = orig->latest_trans;
483 fs_devices->total_devices = orig->total_devices;
484 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
485
486 /* We have held the volume lock, it is safe to get the devices. */
487 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
488 struct rcu_string *name;
489
490 device = kzalloc(sizeof(*device), GFP_NOFS);
491 if (!device)
492 goto error;
493
494 /*
495 * This is ok to do without rcu read locked because we hold the
496 * uuid mutex so nothing we touch in here is going to disappear.
497 */
498 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
499 if (!name) {
500 kfree(device);
501 goto error;
502 }
503 rcu_assign_pointer(device->name, name);
504
505 device->devid = orig_dev->devid;
506 device->work.func = pending_bios_fn;
507 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
508 spin_lock_init(&device->io_lock);
509 INIT_LIST_HEAD(&device->dev_list);
510 INIT_LIST_HEAD(&device->dev_alloc_list);
511
512 list_add(&device->dev_list, &fs_devices->devices);
513 device->fs_devices = fs_devices;
514 fs_devices->num_devices++;
515 }
516 return fs_devices;
517 error:
518 free_fs_devices(fs_devices);
519 return ERR_PTR(-ENOMEM);
520 }
521
522 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
523 struct btrfs_fs_devices *fs_devices, int step)
524 {
525 struct btrfs_device *device, *next;
526
527 struct block_device *latest_bdev = NULL;
528 u64 latest_devid = 0;
529 u64 latest_transid = 0;
530
531 mutex_lock(&uuid_mutex);
532 again:
533 /* This is the initialized path, it is safe to release the devices. */
534 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
535 if (device->in_fs_metadata) {
536 if (!device->is_tgtdev_for_dev_replace &&
537 (!latest_transid ||
538 device->generation > latest_transid)) {
539 latest_devid = device->devid;
540 latest_transid = device->generation;
541 latest_bdev = device->bdev;
542 }
543 continue;
544 }
545
546 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
547 /*
548 * In the first step, keep the device which has
549 * the correct fsid and the devid that is used
550 * for the dev_replace procedure.
551 * In the second step, the dev_replace state is
552 * read from the device tree and it is known
553 * whether the procedure is really active or
554 * not, which means whether this device is
555 * used or whether it should be removed.
556 */
557 if (step == 0 || device->is_tgtdev_for_dev_replace) {
558 continue;
559 }
560 }
561 if (device->bdev) {
562 blkdev_put(device->bdev, device->mode);
563 device->bdev = NULL;
564 fs_devices->open_devices--;
565 }
566 if (device->writeable) {
567 list_del_init(&device->dev_alloc_list);
568 device->writeable = 0;
569 if (!device->is_tgtdev_for_dev_replace)
570 fs_devices->rw_devices--;
571 }
572 list_del_init(&device->dev_list);
573 fs_devices->num_devices--;
574 rcu_string_free(device->name);
575 kfree(device);
576 }
577
578 if (fs_devices->seed) {
579 fs_devices = fs_devices->seed;
580 goto again;
581 }
582
583 fs_devices->latest_bdev = latest_bdev;
584 fs_devices->latest_devid = latest_devid;
585 fs_devices->latest_trans = latest_transid;
586
587 mutex_unlock(&uuid_mutex);
588 }
589
590 static void __free_device(struct work_struct *work)
591 {
592 struct btrfs_device *device;
593
594 device = container_of(work, struct btrfs_device, rcu_work);
595
596 if (device->bdev)
597 blkdev_put(device->bdev, device->mode);
598
599 rcu_string_free(device->name);
600 kfree(device);
601 }
602
603 static void free_device(struct rcu_head *head)
604 {
605 struct btrfs_device *device;
606
607 device = container_of(head, struct btrfs_device, rcu);
608
609 INIT_WORK(&device->rcu_work, __free_device);
610 schedule_work(&device->rcu_work);
611 }
612
613 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
614 {
615 struct btrfs_device *device;
616
617 if (--fs_devices->opened > 0)
618 return 0;
619
620 mutex_lock(&fs_devices->device_list_mutex);
621 list_for_each_entry(device, &fs_devices->devices, dev_list) {
622 struct btrfs_device *new_device;
623 struct rcu_string *name;
624
625 if (device->bdev)
626 fs_devices->open_devices--;
627
628 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
629 list_del_init(&device->dev_alloc_list);
630 fs_devices->rw_devices--;
631 }
632
633 if (device->can_discard)
634 fs_devices->num_can_discard--;
635
636 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
637 BUG_ON(!new_device); /* -ENOMEM */
638 memcpy(new_device, device, sizeof(*new_device));
639
640 /* Safe because we are under uuid_mutex */
641 if (device->name) {
642 name = rcu_string_strdup(device->name->str, GFP_NOFS);
643 BUG_ON(device->name && !name); /* -ENOMEM */
644 rcu_assign_pointer(new_device->name, name);
645 }
646 new_device->bdev = NULL;
647 new_device->writeable = 0;
648 new_device->in_fs_metadata = 0;
649 new_device->can_discard = 0;
650 list_replace_rcu(&device->dev_list, &new_device->dev_list);
651
652 call_rcu(&device->rcu, free_device);
653 }
654 mutex_unlock(&fs_devices->device_list_mutex);
655
656 WARN_ON(fs_devices->open_devices);
657 WARN_ON(fs_devices->rw_devices);
658 fs_devices->opened = 0;
659 fs_devices->seeding = 0;
660
661 return 0;
662 }
663
664 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
665 {
666 struct btrfs_fs_devices *seed_devices = NULL;
667 int ret;
668
669 mutex_lock(&uuid_mutex);
670 ret = __btrfs_close_devices(fs_devices);
671 if (!fs_devices->opened) {
672 seed_devices = fs_devices->seed;
673 fs_devices->seed = NULL;
674 }
675 mutex_unlock(&uuid_mutex);
676
677 while (seed_devices) {
678 fs_devices = seed_devices;
679 seed_devices = fs_devices->seed;
680 __btrfs_close_devices(fs_devices);
681 free_fs_devices(fs_devices);
682 }
683 return ret;
684 }
685
686 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
687 fmode_t flags, void *holder)
688 {
689 struct request_queue *q;
690 struct block_device *bdev;
691 struct list_head *head = &fs_devices->devices;
692 struct btrfs_device *device;
693 struct block_device *latest_bdev = NULL;
694 struct buffer_head *bh;
695 struct btrfs_super_block *disk_super;
696 u64 latest_devid = 0;
697 u64 latest_transid = 0;
698 u64 devid;
699 int seeding = 1;
700 int ret = 0;
701
702 flags |= FMODE_EXCL;
703
704 list_for_each_entry(device, head, dev_list) {
705 if (device->bdev)
706 continue;
707 if (!device->name)
708 continue;
709
710 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
711 &bdev, &bh);
712 if (ret)
713 continue;
714
715 disk_super = (struct btrfs_super_block *)bh->b_data;
716 devid = btrfs_stack_device_id(&disk_super->dev_item);
717 if (devid != device->devid)
718 goto error_brelse;
719
720 if (memcmp(device->uuid, disk_super->dev_item.uuid,
721 BTRFS_UUID_SIZE))
722 goto error_brelse;
723
724 device->generation = btrfs_super_generation(disk_super);
725 if (!latest_transid || device->generation > latest_transid) {
726 latest_devid = devid;
727 latest_transid = device->generation;
728 latest_bdev = bdev;
729 }
730
731 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
732 device->writeable = 0;
733 } else {
734 device->writeable = !bdev_read_only(bdev);
735 seeding = 0;
736 }
737
738 q = bdev_get_queue(bdev);
739 if (blk_queue_discard(q)) {
740 device->can_discard = 1;
741 fs_devices->num_can_discard++;
742 }
743
744 device->bdev = bdev;
745 device->in_fs_metadata = 0;
746 device->mode = flags;
747
748 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
749 fs_devices->rotating = 1;
750
751 fs_devices->open_devices++;
752 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
753 fs_devices->rw_devices++;
754 list_add(&device->dev_alloc_list,
755 &fs_devices->alloc_list);
756 }
757 brelse(bh);
758 continue;
759
760 error_brelse:
761 brelse(bh);
762 blkdev_put(bdev, flags);
763 continue;
764 }
765 if (fs_devices->open_devices == 0) {
766 ret = -EINVAL;
767 goto out;
768 }
769 fs_devices->seeding = seeding;
770 fs_devices->opened = 1;
771 fs_devices->latest_bdev = latest_bdev;
772 fs_devices->latest_devid = latest_devid;
773 fs_devices->latest_trans = latest_transid;
774 fs_devices->total_rw_bytes = 0;
775 out:
776 return ret;
777 }
778
779 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
780 fmode_t flags, void *holder)
781 {
782 int ret;
783
784 mutex_lock(&uuid_mutex);
785 if (fs_devices->opened) {
786 fs_devices->opened++;
787 ret = 0;
788 } else {
789 ret = __btrfs_open_devices(fs_devices, flags, holder);
790 }
791 mutex_unlock(&uuid_mutex);
792 return ret;
793 }
794
795 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
796 struct btrfs_fs_devices **fs_devices_ret)
797 {
798 struct btrfs_super_block *disk_super;
799 struct block_device *bdev;
800 struct buffer_head *bh;
801 int ret;
802 u64 devid;
803 u64 transid;
804 u64 total_devices;
805
806 flags |= FMODE_EXCL;
807 mutex_lock(&uuid_mutex);
808 ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
809 if (ret)
810 goto error;
811 disk_super = (struct btrfs_super_block *)bh->b_data;
812 devid = btrfs_stack_device_id(&disk_super->dev_item);
813 transid = btrfs_super_generation(disk_super);
814 total_devices = btrfs_super_num_devices(disk_super);
815 if (disk_super->label[0]) {
816 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
817 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
818 printk(KERN_INFO "device label %s ", disk_super->label);
819 } else {
820 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
821 }
822 printk(KERN_CONT "devid %llu transid %llu %s\n",
823 (unsigned long long)devid, (unsigned long long)transid, path);
824 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
825 if (!ret && fs_devices_ret)
826 (*fs_devices_ret)->total_devices = total_devices;
827 brelse(bh);
828 blkdev_put(bdev, flags);
829 error:
830 mutex_unlock(&uuid_mutex);
831 return ret;
832 }
833
834 /* helper to account the used device space in the range */
835 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
836 u64 end, u64 *length)
837 {
838 struct btrfs_key key;
839 struct btrfs_root *root = device->dev_root;
840 struct btrfs_dev_extent *dev_extent;
841 struct btrfs_path *path;
842 u64 extent_end;
843 int ret;
844 int slot;
845 struct extent_buffer *l;
846
847 *length = 0;
848
849 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
850 return 0;
851
852 path = btrfs_alloc_path();
853 if (!path)
854 return -ENOMEM;
855 path->reada = 2;
856
857 key.objectid = device->devid;
858 key.offset = start;
859 key.type = BTRFS_DEV_EXTENT_KEY;
860
861 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
862 if (ret < 0)
863 goto out;
864 if (ret > 0) {
865 ret = btrfs_previous_item(root, path, key.objectid, key.type);
866 if (ret < 0)
867 goto out;
868 }
869
870 while (1) {
871 l = path->nodes[0];
872 slot = path->slots[0];
873 if (slot >= btrfs_header_nritems(l)) {
874 ret = btrfs_next_leaf(root, path);
875 if (ret == 0)
876 continue;
877 if (ret < 0)
878 goto out;
879
880 break;
881 }
882 btrfs_item_key_to_cpu(l, &key, slot);
883
884 if (key.objectid < device->devid)
885 goto next;
886
887 if (key.objectid > device->devid)
888 break;
889
890 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
891 goto next;
892
893 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
894 extent_end = key.offset + btrfs_dev_extent_length(l,
895 dev_extent);
896 if (key.offset <= start && extent_end > end) {
897 *length = end - start + 1;
898 break;
899 } else if (key.offset <= start && extent_end > start)
900 *length += extent_end - start;
901 else if (key.offset > start && extent_end <= end)
902 *length += extent_end - key.offset;
903 else if (key.offset > start && key.offset <= end) {
904 *length += end - key.offset + 1;
905 break;
906 } else if (key.offset > end)
907 break;
908
909 next:
910 path->slots[0]++;
911 }
912 ret = 0;
913 out:
914 btrfs_free_path(path);
915 return ret;
916 }
917
918 /*
919 * find_free_dev_extent - find free space in the specified device
920 * @device: the device which we search the free space in
921 * @num_bytes: the size of the free space that we need
922 * @start: store the start of the free space.
923 * @len: the size of the free space. that we find, or the size of the max
924 * free space if we don't find suitable free space
925 *
926 * this uses a pretty simple search, the expectation is that it is
927 * called very infrequently and that a given device has a small number
928 * of extents
929 *
930 * @start is used to store the start of the free space if we find. But if we
931 * don't find suitable free space, it will be used to store the start position
932 * of the max free space.
933 *
934 * @len is used to store the size of the free space that we find.
935 * But if we don't find suitable free space, it is used to store the size of
936 * the max free space.
937 */
938 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
939 u64 *start, u64 *len)
940 {
941 struct btrfs_key key;
942 struct btrfs_root *root = device->dev_root;
943 struct btrfs_dev_extent *dev_extent;
944 struct btrfs_path *path;
945 u64 hole_size;
946 u64 max_hole_start;
947 u64 max_hole_size;
948 u64 extent_end;
949 u64 search_start;
950 u64 search_end = device->total_bytes;
951 int ret;
952 int slot;
953 struct extent_buffer *l;
954
955 /* FIXME use last free of some kind */
956
957 /* we don't want to overwrite the superblock on the drive,
958 * so we make sure to start at an offset of at least 1MB
959 */
960 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
961
962 max_hole_start = search_start;
963 max_hole_size = 0;
964 hole_size = 0;
965
966 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
967 ret = -ENOSPC;
968 goto error;
969 }
970
971 path = btrfs_alloc_path();
972 if (!path) {
973 ret = -ENOMEM;
974 goto error;
975 }
976 path->reada = 2;
977
978 key.objectid = device->devid;
979 key.offset = search_start;
980 key.type = BTRFS_DEV_EXTENT_KEY;
981
982 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
983 if (ret < 0)
984 goto out;
985 if (ret > 0) {
986 ret = btrfs_previous_item(root, path, key.objectid, key.type);
987 if (ret < 0)
988 goto out;
989 }
990
991 while (1) {
992 l = path->nodes[0];
993 slot = path->slots[0];
994 if (slot >= btrfs_header_nritems(l)) {
995 ret = btrfs_next_leaf(root, path);
996 if (ret == 0)
997 continue;
998 if (ret < 0)
999 goto out;
1000
1001 break;
1002 }
1003 btrfs_item_key_to_cpu(l, &key, slot);
1004
1005 if (key.objectid < device->devid)
1006 goto next;
1007
1008 if (key.objectid > device->devid)
1009 break;
1010
1011 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1012 goto next;
1013
1014 if (key.offset > search_start) {
1015 hole_size = key.offset - search_start;
1016
1017 if (hole_size > max_hole_size) {
1018 max_hole_start = search_start;
1019 max_hole_size = hole_size;
1020 }
1021
1022 /*
1023 * If this free space is greater than which we need,
1024 * it must be the max free space that we have found
1025 * until now, so max_hole_start must point to the start
1026 * of this free space and the length of this free space
1027 * is stored in max_hole_size. Thus, we return
1028 * max_hole_start and max_hole_size and go back to the
1029 * caller.
1030 */
1031 if (hole_size >= num_bytes) {
1032 ret = 0;
1033 goto out;
1034 }
1035 }
1036
1037 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1038 extent_end = key.offset + btrfs_dev_extent_length(l,
1039 dev_extent);
1040 if (extent_end > search_start)
1041 search_start = extent_end;
1042 next:
1043 path->slots[0]++;
1044 cond_resched();
1045 }
1046
1047 /*
1048 * At this point, search_start should be the end of
1049 * allocated dev extents, and when shrinking the device,
1050 * search_end may be smaller than search_start.
1051 */
1052 if (search_end > search_start)
1053 hole_size = search_end - search_start;
1054
1055 if (hole_size > max_hole_size) {
1056 max_hole_start = search_start;
1057 max_hole_size = hole_size;
1058 }
1059
1060 /* See above. */
1061 if (hole_size < num_bytes)
1062 ret = -ENOSPC;
1063 else
1064 ret = 0;
1065
1066 out:
1067 btrfs_free_path(path);
1068 error:
1069 *start = max_hole_start;
1070 if (len)
1071 *len = max_hole_size;
1072 return ret;
1073 }
1074
1075 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1076 struct btrfs_device *device,
1077 u64 start)
1078 {
1079 int ret;
1080 struct btrfs_path *path;
1081 struct btrfs_root *root = device->dev_root;
1082 struct btrfs_key key;
1083 struct btrfs_key found_key;
1084 struct extent_buffer *leaf = NULL;
1085 struct btrfs_dev_extent *extent = NULL;
1086
1087 path = btrfs_alloc_path();
1088 if (!path)
1089 return -ENOMEM;
1090
1091 key.objectid = device->devid;
1092 key.offset = start;
1093 key.type = BTRFS_DEV_EXTENT_KEY;
1094 again:
1095 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096 if (ret > 0) {
1097 ret = btrfs_previous_item(root, path, key.objectid,
1098 BTRFS_DEV_EXTENT_KEY);
1099 if (ret)
1100 goto out;
1101 leaf = path->nodes[0];
1102 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1103 extent = btrfs_item_ptr(leaf, path->slots[0],
1104 struct btrfs_dev_extent);
1105 BUG_ON(found_key.offset > start || found_key.offset +
1106 btrfs_dev_extent_length(leaf, extent) < start);
1107 key = found_key;
1108 btrfs_release_path(path);
1109 goto again;
1110 } else if (ret == 0) {
1111 leaf = path->nodes[0];
1112 extent = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_dev_extent);
1114 } else {
1115 btrfs_error(root->fs_info, ret, "Slot search failed");
1116 goto out;
1117 }
1118
1119 if (device->bytes_used > 0) {
1120 u64 len = btrfs_dev_extent_length(leaf, extent);
1121 device->bytes_used -= len;
1122 spin_lock(&root->fs_info->free_chunk_lock);
1123 root->fs_info->free_chunk_space += len;
1124 spin_unlock(&root->fs_info->free_chunk_lock);
1125 }
1126 ret = btrfs_del_item(trans, root, path);
1127 if (ret) {
1128 btrfs_error(root->fs_info, ret,
1129 "Failed to remove dev extent item");
1130 }
1131 out:
1132 btrfs_free_path(path);
1133 return ret;
1134 }
1135
1136 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1137 struct btrfs_device *device,
1138 u64 chunk_tree, u64 chunk_objectid,
1139 u64 chunk_offset, u64 start, u64 num_bytes)
1140 {
1141 int ret;
1142 struct btrfs_path *path;
1143 struct btrfs_root *root = device->dev_root;
1144 struct btrfs_dev_extent *extent;
1145 struct extent_buffer *leaf;
1146 struct btrfs_key key;
1147
1148 WARN_ON(!device->in_fs_metadata);
1149 WARN_ON(device->is_tgtdev_for_dev_replace);
1150 path = btrfs_alloc_path();
1151 if (!path)
1152 return -ENOMEM;
1153
1154 key.objectid = device->devid;
1155 key.offset = start;
1156 key.type = BTRFS_DEV_EXTENT_KEY;
1157 ret = btrfs_insert_empty_item(trans, root, path, &key,
1158 sizeof(*extent));
1159 if (ret)
1160 goto out;
1161
1162 leaf = path->nodes[0];
1163 extent = btrfs_item_ptr(leaf, path->slots[0],
1164 struct btrfs_dev_extent);
1165 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1166 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1167 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1168
1169 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1170 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1171 BTRFS_UUID_SIZE);
1172
1173 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1174 btrfs_mark_buffer_dirty(leaf);
1175 out:
1176 btrfs_free_path(path);
1177 return ret;
1178 }
1179
1180 static noinline int find_next_chunk(struct btrfs_root *root,
1181 u64 objectid, u64 *offset)
1182 {
1183 struct btrfs_path *path;
1184 int ret;
1185 struct btrfs_key key;
1186 struct btrfs_chunk *chunk;
1187 struct btrfs_key found_key;
1188
1189 path = btrfs_alloc_path();
1190 if (!path)
1191 return -ENOMEM;
1192
1193 key.objectid = objectid;
1194 key.offset = (u64)-1;
1195 key.type = BTRFS_CHUNK_ITEM_KEY;
1196
1197 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1198 if (ret < 0)
1199 goto error;
1200
1201 BUG_ON(ret == 0); /* Corruption */
1202
1203 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1204 if (ret) {
1205 *offset = 0;
1206 } else {
1207 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1208 path->slots[0]);
1209 if (found_key.objectid != objectid)
1210 *offset = 0;
1211 else {
1212 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1213 struct btrfs_chunk);
1214 *offset = found_key.offset +
1215 btrfs_chunk_length(path->nodes[0], chunk);
1216 }
1217 }
1218 ret = 0;
1219 error:
1220 btrfs_free_path(path);
1221 return ret;
1222 }
1223
1224 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1225 {
1226 int ret;
1227 struct btrfs_key key;
1228 struct btrfs_key found_key;
1229 struct btrfs_path *path;
1230
1231 root = root->fs_info->chunk_root;
1232
1233 path = btrfs_alloc_path();
1234 if (!path)
1235 return -ENOMEM;
1236
1237 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1238 key.type = BTRFS_DEV_ITEM_KEY;
1239 key.offset = (u64)-1;
1240
1241 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1242 if (ret < 0)
1243 goto error;
1244
1245 BUG_ON(ret == 0); /* Corruption */
1246
1247 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1248 BTRFS_DEV_ITEM_KEY);
1249 if (ret) {
1250 *objectid = 1;
1251 } else {
1252 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1253 path->slots[0]);
1254 *objectid = found_key.offset + 1;
1255 }
1256 ret = 0;
1257 error:
1258 btrfs_free_path(path);
1259 return ret;
1260 }
1261
1262 /*
1263 * the device information is stored in the chunk root
1264 * the btrfs_device struct should be fully filled in
1265 */
1266 int btrfs_add_device(struct btrfs_trans_handle *trans,
1267 struct btrfs_root *root,
1268 struct btrfs_device *device)
1269 {
1270 int ret;
1271 struct btrfs_path *path;
1272 struct btrfs_dev_item *dev_item;
1273 struct extent_buffer *leaf;
1274 struct btrfs_key key;
1275 unsigned long ptr;
1276
1277 root = root->fs_info->chunk_root;
1278
1279 path = btrfs_alloc_path();
1280 if (!path)
1281 return -ENOMEM;
1282
1283 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 key.type = BTRFS_DEV_ITEM_KEY;
1285 key.offset = device->devid;
1286
1287 ret = btrfs_insert_empty_item(trans, root, path, &key,
1288 sizeof(*dev_item));
1289 if (ret)
1290 goto out;
1291
1292 leaf = path->nodes[0];
1293 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1294
1295 btrfs_set_device_id(leaf, dev_item, device->devid);
1296 btrfs_set_device_generation(leaf, dev_item, 0);
1297 btrfs_set_device_type(leaf, dev_item, device->type);
1298 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1299 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1300 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1301 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1302 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1303 btrfs_set_device_group(leaf, dev_item, 0);
1304 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1305 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1306 btrfs_set_device_start_offset(leaf, dev_item, 0);
1307
1308 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1309 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1310 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1311 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1312 btrfs_mark_buffer_dirty(leaf);
1313
1314 ret = 0;
1315 out:
1316 btrfs_free_path(path);
1317 return ret;
1318 }
1319
1320 static int btrfs_rm_dev_item(struct btrfs_root *root,
1321 struct btrfs_device *device)
1322 {
1323 int ret;
1324 struct btrfs_path *path;
1325 struct btrfs_key key;
1326 struct btrfs_trans_handle *trans;
1327
1328 root = root->fs_info->chunk_root;
1329
1330 path = btrfs_alloc_path();
1331 if (!path)
1332 return -ENOMEM;
1333
1334 trans = btrfs_start_transaction(root, 0);
1335 if (IS_ERR(trans)) {
1336 btrfs_free_path(path);
1337 return PTR_ERR(trans);
1338 }
1339 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1340 key.type = BTRFS_DEV_ITEM_KEY;
1341 key.offset = device->devid;
1342 lock_chunks(root);
1343
1344 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1345 if (ret < 0)
1346 goto out;
1347
1348 if (ret > 0) {
1349 ret = -ENOENT;
1350 goto out;
1351 }
1352
1353 ret = btrfs_del_item(trans, root, path);
1354 if (ret)
1355 goto out;
1356 out:
1357 btrfs_free_path(path);
1358 unlock_chunks(root);
1359 btrfs_commit_transaction(trans, root);
1360 return ret;
1361 }
1362
1363 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1364 {
1365 struct btrfs_device *device;
1366 struct btrfs_device *next_device;
1367 struct block_device *bdev;
1368 struct buffer_head *bh = NULL;
1369 struct btrfs_super_block *disk_super;
1370 struct btrfs_fs_devices *cur_devices;
1371 u64 all_avail;
1372 u64 devid;
1373 u64 num_devices;
1374 u8 *dev_uuid;
1375 int ret = 0;
1376 bool clear_super = false;
1377
1378 mutex_lock(&uuid_mutex);
1379
1380 all_avail = root->fs_info->avail_data_alloc_bits |
1381 root->fs_info->avail_system_alloc_bits |
1382 root->fs_info->avail_metadata_alloc_bits;
1383
1384 num_devices = root->fs_info->fs_devices->num_devices;
1385 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1386 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1387 WARN_ON(num_devices < 1);
1388 num_devices--;
1389 }
1390 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1391
1392 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1393 printk(KERN_ERR "btrfs: unable to go below four devices "
1394 "on raid10\n");
1395 ret = -EINVAL;
1396 goto out;
1397 }
1398
1399 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1400 printk(KERN_ERR "btrfs: unable to go below two "
1401 "devices on raid1\n");
1402 ret = -EINVAL;
1403 goto out;
1404 }
1405
1406 if (strcmp(device_path, "missing") == 0) {
1407 struct list_head *devices;
1408 struct btrfs_device *tmp;
1409
1410 device = NULL;
1411 devices = &root->fs_info->fs_devices->devices;
1412 /*
1413 * It is safe to read the devices since the volume_mutex
1414 * is held.
1415 */
1416 list_for_each_entry(tmp, devices, dev_list) {
1417 if (tmp->in_fs_metadata &&
1418 !tmp->is_tgtdev_for_dev_replace &&
1419 !tmp->bdev) {
1420 device = tmp;
1421 break;
1422 }
1423 }
1424 bdev = NULL;
1425 bh = NULL;
1426 disk_super = NULL;
1427 if (!device) {
1428 printk(KERN_ERR "btrfs: no missing devices found to "
1429 "remove\n");
1430 goto out;
1431 }
1432 } else {
1433 ret = btrfs_get_bdev_and_sb(device_path,
1434 FMODE_READ | FMODE_EXCL,
1435 root->fs_info->bdev_holder, 0,
1436 &bdev, &bh);
1437 if (ret)
1438 goto out;
1439 disk_super = (struct btrfs_super_block *)bh->b_data;
1440 devid = btrfs_stack_device_id(&disk_super->dev_item);
1441 dev_uuid = disk_super->dev_item.uuid;
1442 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1443 disk_super->fsid);
1444 if (!device) {
1445 ret = -ENOENT;
1446 goto error_brelse;
1447 }
1448 }
1449
1450 if (device->is_tgtdev_for_dev_replace) {
1451 pr_err("btrfs: unable to remove the dev_replace target dev\n");
1452 ret = -EINVAL;
1453 goto error_brelse;
1454 }
1455
1456 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1457 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1458 "device\n");
1459 ret = -EINVAL;
1460 goto error_brelse;
1461 }
1462
1463 if (device->writeable) {
1464 lock_chunks(root);
1465 list_del_init(&device->dev_alloc_list);
1466 unlock_chunks(root);
1467 root->fs_info->fs_devices->rw_devices--;
1468 clear_super = true;
1469 }
1470
1471 ret = btrfs_shrink_device(device, 0);
1472 if (ret)
1473 goto error_undo;
1474
1475 /*
1476 * TODO: the superblock still includes this device in its num_devices
1477 * counter although write_all_supers() is not locked out. This
1478 * could give a filesystem state which requires a degraded mount.
1479 */
1480 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1481 if (ret)
1482 goto error_undo;
1483
1484 spin_lock(&root->fs_info->free_chunk_lock);
1485 root->fs_info->free_chunk_space = device->total_bytes -
1486 device->bytes_used;
1487 spin_unlock(&root->fs_info->free_chunk_lock);
1488
1489 device->in_fs_metadata = 0;
1490 btrfs_scrub_cancel_dev(root->fs_info, device);
1491
1492 /*
1493 * the device list mutex makes sure that we don't change
1494 * the device list while someone else is writing out all
1495 * the device supers.
1496 */
1497
1498 cur_devices = device->fs_devices;
1499 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1500 list_del_rcu(&device->dev_list);
1501
1502 device->fs_devices->num_devices--;
1503 device->fs_devices->total_devices--;
1504
1505 if (device->missing)
1506 root->fs_info->fs_devices->missing_devices--;
1507
1508 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1509 struct btrfs_device, dev_list);
1510 if (device->bdev == root->fs_info->sb->s_bdev)
1511 root->fs_info->sb->s_bdev = next_device->bdev;
1512 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1513 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1514
1515 if (device->bdev)
1516 device->fs_devices->open_devices--;
1517
1518 call_rcu(&device->rcu, free_device);
1519 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1520
1521 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1522 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1523
1524 if (cur_devices->open_devices == 0) {
1525 struct btrfs_fs_devices *fs_devices;
1526 fs_devices = root->fs_info->fs_devices;
1527 while (fs_devices) {
1528 if (fs_devices->seed == cur_devices)
1529 break;
1530 fs_devices = fs_devices->seed;
1531 }
1532 fs_devices->seed = cur_devices->seed;
1533 cur_devices->seed = NULL;
1534 lock_chunks(root);
1535 __btrfs_close_devices(cur_devices);
1536 unlock_chunks(root);
1537 free_fs_devices(cur_devices);
1538 }
1539
1540 root->fs_info->num_tolerated_disk_barrier_failures =
1541 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1542
1543 /*
1544 * at this point, the device is zero sized. We want to
1545 * remove it from the devices list and zero out the old super
1546 */
1547 if (clear_super && disk_super) {
1548 /* make sure this device isn't detected as part of
1549 * the FS anymore
1550 */
1551 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1552 set_buffer_dirty(bh);
1553 sync_dirty_buffer(bh);
1554 }
1555
1556 ret = 0;
1557
1558 /* Notify udev that device has changed */
1559 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1560
1561 error_brelse:
1562 brelse(bh);
1563 if (bdev)
1564 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1565 out:
1566 mutex_unlock(&uuid_mutex);
1567 return ret;
1568 error_undo:
1569 if (device->writeable) {
1570 lock_chunks(root);
1571 list_add(&device->dev_alloc_list,
1572 &root->fs_info->fs_devices->alloc_list);
1573 unlock_chunks(root);
1574 root->fs_info->fs_devices->rw_devices++;
1575 }
1576 goto error_brelse;
1577 }
1578
1579 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1580 struct btrfs_device *srcdev)
1581 {
1582 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1583 list_del_rcu(&srcdev->dev_list);
1584 list_del_rcu(&srcdev->dev_alloc_list);
1585 fs_info->fs_devices->num_devices--;
1586 if (srcdev->missing) {
1587 fs_info->fs_devices->missing_devices--;
1588 fs_info->fs_devices->rw_devices++;
1589 }
1590 if (srcdev->can_discard)
1591 fs_info->fs_devices->num_can_discard--;
1592 if (srcdev->bdev)
1593 fs_info->fs_devices->open_devices--;
1594
1595 call_rcu(&srcdev->rcu, free_device);
1596 }
1597
1598 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1599 struct btrfs_device *tgtdev)
1600 {
1601 struct btrfs_device *next_device;
1602
1603 WARN_ON(!tgtdev);
1604 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1605 if (tgtdev->bdev) {
1606 btrfs_scratch_superblock(tgtdev);
1607 fs_info->fs_devices->open_devices--;
1608 }
1609 fs_info->fs_devices->num_devices--;
1610 if (tgtdev->can_discard)
1611 fs_info->fs_devices->num_can_discard++;
1612
1613 next_device = list_entry(fs_info->fs_devices->devices.next,
1614 struct btrfs_device, dev_list);
1615 if (tgtdev->bdev == fs_info->sb->s_bdev)
1616 fs_info->sb->s_bdev = next_device->bdev;
1617 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1618 fs_info->fs_devices->latest_bdev = next_device->bdev;
1619 list_del_rcu(&tgtdev->dev_list);
1620
1621 call_rcu(&tgtdev->rcu, free_device);
1622
1623 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1624 }
1625
1626 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1627 struct btrfs_device **device)
1628 {
1629 int ret = 0;
1630 struct btrfs_super_block *disk_super;
1631 u64 devid;
1632 u8 *dev_uuid;
1633 struct block_device *bdev;
1634 struct buffer_head *bh;
1635
1636 *device = NULL;
1637 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1638 root->fs_info->bdev_holder, 0, &bdev, &bh);
1639 if (ret)
1640 return ret;
1641 disk_super = (struct btrfs_super_block *)bh->b_data;
1642 devid = btrfs_stack_device_id(&disk_super->dev_item);
1643 dev_uuid = disk_super->dev_item.uuid;
1644 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1645 disk_super->fsid);
1646 brelse(bh);
1647 if (!*device)
1648 ret = -ENOENT;
1649 blkdev_put(bdev, FMODE_READ);
1650 return ret;
1651 }
1652
1653 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1654 char *device_path,
1655 struct btrfs_device **device)
1656 {
1657 *device = NULL;
1658 if (strcmp(device_path, "missing") == 0) {
1659 struct list_head *devices;
1660 struct btrfs_device *tmp;
1661
1662 devices = &root->fs_info->fs_devices->devices;
1663 /*
1664 * It is safe to read the devices since the volume_mutex
1665 * is held by the caller.
1666 */
1667 list_for_each_entry(tmp, devices, dev_list) {
1668 if (tmp->in_fs_metadata && !tmp->bdev) {
1669 *device = tmp;
1670 break;
1671 }
1672 }
1673
1674 if (!*device) {
1675 pr_err("btrfs: no missing device found\n");
1676 return -ENOENT;
1677 }
1678
1679 return 0;
1680 } else {
1681 return btrfs_find_device_by_path(root, device_path, device);
1682 }
1683 }
1684
1685 /*
1686 * does all the dirty work required for changing file system's UUID.
1687 */
1688 static int btrfs_prepare_sprout(struct btrfs_root *root)
1689 {
1690 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1691 struct btrfs_fs_devices *old_devices;
1692 struct btrfs_fs_devices *seed_devices;
1693 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1694 struct btrfs_device *device;
1695 u64 super_flags;
1696
1697 BUG_ON(!mutex_is_locked(&uuid_mutex));
1698 if (!fs_devices->seeding)
1699 return -EINVAL;
1700
1701 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1702 if (!seed_devices)
1703 return -ENOMEM;
1704
1705 old_devices = clone_fs_devices(fs_devices);
1706 if (IS_ERR(old_devices)) {
1707 kfree(seed_devices);
1708 return PTR_ERR(old_devices);
1709 }
1710
1711 list_add(&old_devices->list, &fs_uuids);
1712
1713 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1714 seed_devices->opened = 1;
1715 INIT_LIST_HEAD(&seed_devices->devices);
1716 INIT_LIST_HEAD(&seed_devices->alloc_list);
1717 mutex_init(&seed_devices->device_list_mutex);
1718
1719 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1720 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1721 synchronize_rcu);
1722 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1723
1724 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1725 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1726 device->fs_devices = seed_devices;
1727 }
1728
1729 fs_devices->seeding = 0;
1730 fs_devices->num_devices = 0;
1731 fs_devices->open_devices = 0;
1732 fs_devices->total_devices = 0;
1733 fs_devices->seed = seed_devices;
1734
1735 generate_random_uuid(fs_devices->fsid);
1736 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1737 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1738 super_flags = btrfs_super_flags(disk_super) &
1739 ~BTRFS_SUPER_FLAG_SEEDING;
1740 btrfs_set_super_flags(disk_super, super_flags);
1741
1742 return 0;
1743 }
1744
1745 /*
1746 * strore the expected generation for seed devices in device items.
1747 */
1748 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1749 struct btrfs_root *root)
1750 {
1751 struct btrfs_path *path;
1752 struct extent_buffer *leaf;
1753 struct btrfs_dev_item *dev_item;
1754 struct btrfs_device *device;
1755 struct btrfs_key key;
1756 u8 fs_uuid[BTRFS_UUID_SIZE];
1757 u8 dev_uuid[BTRFS_UUID_SIZE];
1758 u64 devid;
1759 int ret;
1760
1761 path = btrfs_alloc_path();
1762 if (!path)
1763 return -ENOMEM;
1764
1765 root = root->fs_info->chunk_root;
1766 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1767 key.offset = 0;
1768 key.type = BTRFS_DEV_ITEM_KEY;
1769
1770 while (1) {
1771 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1772 if (ret < 0)
1773 goto error;
1774
1775 leaf = path->nodes[0];
1776 next_slot:
1777 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1778 ret = btrfs_next_leaf(root, path);
1779 if (ret > 0)
1780 break;
1781 if (ret < 0)
1782 goto error;
1783 leaf = path->nodes[0];
1784 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1785 btrfs_release_path(path);
1786 continue;
1787 }
1788
1789 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1790 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1791 key.type != BTRFS_DEV_ITEM_KEY)
1792 break;
1793
1794 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1795 struct btrfs_dev_item);
1796 devid = btrfs_device_id(leaf, dev_item);
1797 read_extent_buffer(leaf, dev_uuid,
1798 (unsigned long)btrfs_device_uuid(dev_item),
1799 BTRFS_UUID_SIZE);
1800 read_extent_buffer(leaf, fs_uuid,
1801 (unsigned long)btrfs_device_fsid(dev_item),
1802 BTRFS_UUID_SIZE);
1803 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1804 fs_uuid);
1805 BUG_ON(!device); /* Logic error */
1806
1807 if (device->fs_devices->seeding) {
1808 btrfs_set_device_generation(leaf, dev_item,
1809 device->generation);
1810 btrfs_mark_buffer_dirty(leaf);
1811 }
1812
1813 path->slots[0]++;
1814 goto next_slot;
1815 }
1816 ret = 0;
1817 error:
1818 btrfs_free_path(path);
1819 return ret;
1820 }
1821
1822 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1823 {
1824 struct request_queue *q;
1825 struct btrfs_trans_handle *trans;
1826 struct btrfs_device *device;
1827 struct block_device *bdev;
1828 struct list_head *devices;
1829 struct super_block *sb = root->fs_info->sb;
1830 struct rcu_string *name;
1831 u64 total_bytes;
1832 int seeding_dev = 0;
1833 int ret = 0;
1834
1835 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1836 return -EROFS;
1837
1838 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1839 root->fs_info->bdev_holder);
1840 if (IS_ERR(bdev))
1841 return PTR_ERR(bdev);
1842
1843 if (root->fs_info->fs_devices->seeding) {
1844 seeding_dev = 1;
1845 down_write(&sb->s_umount);
1846 mutex_lock(&uuid_mutex);
1847 }
1848
1849 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1850
1851 devices = &root->fs_info->fs_devices->devices;
1852
1853 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1854 list_for_each_entry(device, devices, dev_list) {
1855 if (device->bdev == bdev) {
1856 ret = -EEXIST;
1857 mutex_unlock(
1858 &root->fs_info->fs_devices->device_list_mutex);
1859 goto error;
1860 }
1861 }
1862 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1863
1864 device = kzalloc(sizeof(*device), GFP_NOFS);
1865 if (!device) {
1866 /* we can safely leave the fs_devices entry around */
1867 ret = -ENOMEM;
1868 goto error;
1869 }
1870
1871 name = rcu_string_strdup(device_path, GFP_NOFS);
1872 if (!name) {
1873 kfree(device);
1874 ret = -ENOMEM;
1875 goto error;
1876 }
1877 rcu_assign_pointer(device->name, name);
1878
1879 ret = find_next_devid(root, &device->devid);
1880 if (ret) {
1881 rcu_string_free(device->name);
1882 kfree(device);
1883 goto error;
1884 }
1885
1886 trans = btrfs_start_transaction(root, 0);
1887 if (IS_ERR(trans)) {
1888 rcu_string_free(device->name);
1889 kfree(device);
1890 ret = PTR_ERR(trans);
1891 goto error;
1892 }
1893
1894 lock_chunks(root);
1895
1896 q = bdev_get_queue(bdev);
1897 if (blk_queue_discard(q))
1898 device->can_discard = 1;
1899 device->writeable = 1;
1900 device->work.func = pending_bios_fn;
1901 generate_random_uuid(device->uuid);
1902 spin_lock_init(&device->io_lock);
1903 device->generation = trans->transid;
1904 device->io_width = root->sectorsize;
1905 device->io_align = root->sectorsize;
1906 device->sector_size = root->sectorsize;
1907 device->total_bytes = i_size_read(bdev->bd_inode);
1908 device->disk_total_bytes = device->total_bytes;
1909 device->dev_root = root->fs_info->dev_root;
1910 device->bdev = bdev;
1911 device->in_fs_metadata = 1;
1912 device->is_tgtdev_for_dev_replace = 0;
1913 device->mode = FMODE_EXCL;
1914 set_blocksize(device->bdev, 4096);
1915
1916 if (seeding_dev) {
1917 sb->s_flags &= ~MS_RDONLY;
1918 ret = btrfs_prepare_sprout(root);
1919 BUG_ON(ret); /* -ENOMEM */
1920 }
1921
1922 device->fs_devices = root->fs_info->fs_devices;
1923
1924 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1925 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1926 list_add(&device->dev_alloc_list,
1927 &root->fs_info->fs_devices->alloc_list);
1928 root->fs_info->fs_devices->num_devices++;
1929 root->fs_info->fs_devices->open_devices++;
1930 root->fs_info->fs_devices->rw_devices++;
1931 root->fs_info->fs_devices->total_devices++;
1932 if (device->can_discard)
1933 root->fs_info->fs_devices->num_can_discard++;
1934 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1935
1936 spin_lock(&root->fs_info->free_chunk_lock);
1937 root->fs_info->free_chunk_space += device->total_bytes;
1938 spin_unlock(&root->fs_info->free_chunk_lock);
1939
1940 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1941 root->fs_info->fs_devices->rotating = 1;
1942
1943 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1944 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1945 total_bytes + device->total_bytes);
1946
1947 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1948 btrfs_set_super_num_devices(root->fs_info->super_copy,
1949 total_bytes + 1);
1950 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1951
1952 if (seeding_dev) {
1953 ret = init_first_rw_device(trans, root, device);
1954 if (ret) {
1955 btrfs_abort_transaction(trans, root, ret);
1956 goto error_trans;
1957 }
1958 ret = btrfs_finish_sprout(trans, root);
1959 if (ret) {
1960 btrfs_abort_transaction(trans, root, ret);
1961 goto error_trans;
1962 }
1963 } else {
1964 ret = btrfs_add_device(trans, root, device);
1965 if (ret) {
1966 btrfs_abort_transaction(trans, root, ret);
1967 goto error_trans;
1968 }
1969 }
1970
1971 /*
1972 * we've got more storage, clear any full flags on the space
1973 * infos
1974 */
1975 btrfs_clear_space_info_full(root->fs_info);
1976
1977 unlock_chunks(root);
1978 root->fs_info->num_tolerated_disk_barrier_failures =
1979 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1980 ret = btrfs_commit_transaction(trans, root);
1981
1982 if (seeding_dev) {
1983 mutex_unlock(&uuid_mutex);
1984 up_write(&sb->s_umount);
1985
1986 if (ret) /* transaction commit */
1987 return ret;
1988
1989 ret = btrfs_relocate_sys_chunks(root);
1990 if (ret < 0)
1991 btrfs_error(root->fs_info, ret,
1992 "Failed to relocate sys chunks after "
1993 "device initialization. This can be fixed "
1994 "using the \"btrfs balance\" command.");
1995 trans = btrfs_attach_transaction(root);
1996 if (IS_ERR(trans)) {
1997 if (PTR_ERR(trans) == -ENOENT)
1998 return 0;
1999 return PTR_ERR(trans);
2000 }
2001 ret = btrfs_commit_transaction(trans, root);
2002 }
2003
2004 return ret;
2005
2006 error_trans:
2007 unlock_chunks(root);
2008 btrfs_end_transaction(trans, root);
2009 rcu_string_free(device->name);
2010 kfree(device);
2011 error:
2012 blkdev_put(bdev, FMODE_EXCL);
2013 if (seeding_dev) {
2014 mutex_unlock(&uuid_mutex);
2015 up_write(&sb->s_umount);
2016 }
2017 return ret;
2018 }
2019
2020 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2021 struct btrfs_device **device_out)
2022 {
2023 struct request_queue *q;
2024 struct btrfs_device *device;
2025 struct block_device *bdev;
2026 struct btrfs_fs_info *fs_info = root->fs_info;
2027 struct list_head *devices;
2028 struct rcu_string *name;
2029 int ret = 0;
2030
2031 *device_out = NULL;
2032 if (fs_info->fs_devices->seeding)
2033 return -EINVAL;
2034
2035 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2036 fs_info->bdev_holder);
2037 if (IS_ERR(bdev))
2038 return PTR_ERR(bdev);
2039
2040 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2041
2042 devices = &fs_info->fs_devices->devices;
2043 list_for_each_entry(device, devices, dev_list) {
2044 if (device->bdev == bdev) {
2045 ret = -EEXIST;
2046 goto error;
2047 }
2048 }
2049
2050 device = kzalloc(sizeof(*device), GFP_NOFS);
2051 if (!device) {
2052 ret = -ENOMEM;
2053 goto error;
2054 }
2055
2056 name = rcu_string_strdup(device_path, GFP_NOFS);
2057 if (!name) {
2058 kfree(device);
2059 ret = -ENOMEM;
2060 goto error;
2061 }
2062 rcu_assign_pointer(device->name, name);
2063
2064 q = bdev_get_queue(bdev);
2065 if (blk_queue_discard(q))
2066 device->can_discard = 1;
2067 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2068 device->writeable = 1;
2069 device->work.func = pending_bios_fn;
2070 generate_random_uuid(device->uuid);
2071 device->devid = BTRFS_DEV_REPLACE_DEVID;
2072 spin_lock_init(&device->io_lock);
2073 device->generation = 0;
2074 device->io_width = root->sectorsize;
2075 device->io_align = root->sectorsize;
2076 device->sector_size = root->sectorsize;
2077 device->total_bytes = i_size_read(bdev->bd_inode);
2078 device->disk_total_bytes = device->total_bytes;
2079 device->dev_root = fs_info->dev_root;
2080 device->bdev = bdev;
2081 device->in_fs_metadata = 1;
2082 device->is_tgtdev_for_dev_replace = 1;
2083 device->mode = FMODE_EXCL;
2084 set_blocksize(device->bdev, 4096);
2085 device->fs_devices = fs_info->fs_devices;
2086 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2087 fs_info->fs_devices->num_devices++;
2088 fs_info->fs_devices->open_devices++;
2089 if (device->can_discard)
2090 fs_info->fs_devices->num_can_discard++;
2091 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2092
2093 *device_out = device;
2094 return ret;
2095
2096 error:
2097 blkdev_put(bdev, FMODE_EXCL);
2098 return ret;
2099 }
2100
2101 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2102 struct btrfs_device *tgtdev)
2103 {
2104 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2105 tgtdev->io_width = fs_info->dev_root->sectorsize;
2106 tgtdev->io_align = fs_info->dev_root->sectorsize;
2107 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2108 tgtdev->dev_root = fs_info->dev_root;
2109 tgtdev->in_fs_metadata = 1;
2110 }
2111
2112 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2113 struct btrfs_device *device)
2114 {
2115 int ret;
2116 struct btrfs_path *path;
2117 struct btrfs_root *root;
2118 struct btrfs_dev_item *dev_item;
2119 struct extent_buffer *leaf;
2120 struct btrfs_key key;
2121
2122 root = device->dev_root->fs_info->chunk_root;
2123
2124 path = btrfs_alloc_path();
2125 if (!path)
2126 return -ENOMEM;
2127
2128 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2129 key.type = BTRFS_DEV_ITEM_KEY;
2130 key.offset = device->devid;
2131
2132 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2133 if (ret < 0)
2134 goto out;
2135
2136 if (ret > 0) {
2137 ret = -ENOENT;
2138 goto out;
2139 }
2140
2141 leaf = path->nodes[0];
2142 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2143
2144 btrfs_set_device_id(leaf, dev_item, device->devid);
2145 btrfs_set_device_type(leaf, dev_item, device->type);
2146 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2147 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2148 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2149 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2150 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2151 btrfs_mark_buffer_dirty(leaf);
2152
2153 out:
2154 btrfs_free_path(path);
2155 return ret;
2156 }
2157
2158 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2159 struct btrfs_device *device, u64 new_size)
2160 {
2161 struct btrfs_super_block *super_copy =
2162 device->dev_root->fs_info->super_copy;
2163 u64 old_total = btrfs_super_total_bytes(super_copy);
2164 u64 diff = new_size - device->total_bytes;
2165
2166 if (!device->writeable)
2167 return -EACCES;
2168 if (new_size <= device->total_bytes ||
2169 device->is_tgtdev_for_dev_replace)
2170 return -EINVAL;
2171
2172 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2173 device->fs_devices->total_rw_bytes += diff;
2174
2175 device->total_bytes = new_size;
2176 device->disk_total_bytes = new_size;
2177 btrfs_clear_space_info_full(device->dev_root->fs_info);
2178
2179 return btrfs_update_device(trans, device);
2180 }
2181
2182 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2183 struct btrfs_device *device, u64 new_size)
2184 {
2185 int ret;
2186 lock_chunks(device->dev_root);
2187 ret = __btrfs_grow_device(trans, device, new_size);
2188 unlock_chunks(device->dev_root);
2189 return ret;
2190 }
2191
2192 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2193 struct btrfs_root *root,
2194 u64 chunk_tree, u64 chunk_objectid,
2195 u64 chunk_offset)
2196 {
2197 int ret;
2198 struct btrfs_path *path;
2199 struct btrfs_key key;
2200
2201 root = root->fs_info->chunk_root;
2202 path = btrfs_alloc_path();
2203 if (!path)
2204 return -ENOMEM;
2205
2206 key.objectid = chunk_objectid;
2207 key.offset = chunk_offset;
2208 key.type = BTRFS_CHUNK_ITEM_KEY;
2209
2210 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2211 if (ret < 0)
2212 goto out;
2213 else if (ret > 0) { /* Logic error or corruption */
2214 btrfs_error(root->fs_info, -ENOENT,
2215 "Failed lookup while freeing chunk.");
2216 ret = -ENOENT;
2217 goto out;
2218 }
2219
2220 ret = btrfs_del_item(trans, root, path);
2221 if (ret < 0)
2222 btrfs_error(root->fs_info, ret,
2223 "Failed to delete chunk item.");
2224 out:
2225 btrfs_free_path(path);
2226 return ret;
2227 }
2228
2229 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2230 chunk_offset)
2231 {
2232 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2233 struct btrfs_disk_key *disk_key;
2234 struct btrfs_chunk *chunk;
2235 u8 *ptr;
2236 int ret = 0;
2237 u32 num_stripes;
2238 u32 array_size;
2239 u32 len = 0;
2240 u32 cur;
2241 struct btrfs_key key;
2242
2243 array_size = btrfs_super_sys_array_size(super_copy);
2244
2245 ptr = super_copy->sys_chunk_array;
2246 cur = 0;
2247
2248 while (cur < array_size) {
2249 disk_key = (struct btrfs_disk_key *)ptr;
2250 btrfs_disk_key_to_cpu(&key, disk_key);
2251
2252 len = sizeof(*disk_key);
2253
2254 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2255 chunk = (struct btrfs_chunk *)(ptr + len);
2256 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2257 len += btrfs_chunk_item_size(num_stripes);
2258 } else {
2259 ret = -EIO;
2260 break;
2261 }
2262 if (key.objectid == chunk_objectid &&
2263 key.offset == chunk_offset) {
2264 memmove(ptr, ptr + len, array_size - (cur + len));
2265 array_size -= len;
2266 btrfs_set_super_sys_array_size(super_copy, array_size);
2267 } else {
2268 ptr += len;
2269 cur += len;
2270 }
2271 }
2272 return ret;
2273 }
2274
2275 static int btrfs_relocate_chunk(struct btrfs_root *root,
2276 u64 chunk_tree, u64 chunk_objectid,
2277 u64 chunk_offset)
2278 {
2279 struct extent_map_tree *em_tree;
2280 struct btrfs_root *extent_root;
2281 struct btrfs_trans_handle *trans;
2282 struct extent_map *em;
2283 struct map_lookup *map;
2284 int ret;
2285 int i;
2286
2287 root = root->fs_info->chunk_root;
2288 extent_root = root->fs_info->extent_root;
2289 em_tree = &root->fs_info->mapping_tree.map_tree;
2290
2291 ret = btrfs_can_relocate(extent_root, chunk_offset);
2292 if (ret)
2293 return -ENOSPC;
2294
2295 /* step one, relocate all the extents inside this chunk */
2296 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2297 if (ret)
2298 return ret;
2299
2300 trans = btrfs_start_transaction(root, 0);
2301 BUG_ON(IS_ERR(trans));
2302
2303 lock_chunks(root);
2304
2305 /*
2306 * step two, delete the device extents and the
2307 * chunk tree entries
2308 */
2309 read_lock(&em_tree->lock);
2310 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2311 read_unlock(&em_tree->lock);
2312
2313 BUG_ON(!em || em->start > chunk_offset ||
2314 em->start + em->len < chunk_offset);
2315 map = (struct map_lookup *)em->bdev;
2316
2317 for (i = 0; i < map->num_stripes; i++) {
2318 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2319 map->stripes[i].physical);
2320 BUG_ON(ret);
2321
2322 if (map->stripes[i].dev) {
2323 ret = btrfs_update_device(trans, map->stripes[i].dev);
2324 BUG_ON(ret);
2325 }
2326 }
2327 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2328 chunk_offset);
2329
2330 BUG_ON(ret);
2331
2332 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2333
2334 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2335 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2336 BUG_ON(ret);
2337 }
2338
2339 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2340 BUG_ON(ret);
2341
2342 write_lock(&em_tree->lock);
2343 remove_extent_mapping(em_tree, em);
2344 write_unlock(&em_tree->lock);
2345
2346 kfree(map);
2347 em->bdev = NULL;
2348
2349 /* once for the tree */
2350 free_extent_map(em);
2351 /* once for us */
2352 free_extent_map(em);
2353
2354 unlock_chunks(root);
2355 btrfs_end_transaction(trans, root);
2356 return 0;
2357 }
2358
2359 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2360 {
2361 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2362 struct btrfs_path *path;
2363 struct extent_buffer *leaf;
2364 struct btrfs_chunk *chunk;
2365 struct btrfs_key key;
2366 struct btrfs_key found_key;
2367 u64 chunk_tree = chunk_root->root_key.objectid;
2368 u64 chunk_type;
2369 bool retried = false;
2370 int failed = 0;
2371 int ret;
2372
2373 path = btrfs_alloc_path();
2374 if (!path)
2375 return -ENOMEM;
2376
2377 again:
2378 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2379 key.offset = (u64)-1;
2380 key.type = BTRFS_CHUNK_ITEM_KEY;
2381
2382 while (1) {
2383 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2384 if (ret < 0)
2385 goto error;
2386 BUG_ON(ret == 0); /* Corruption */
2387
2388 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2389 key.type);
2390 if (ret < 0)
2391 goto error;
2392 if (ret > 0)
2393 break;
2394
2395 leaf = path->nodes[0];
2396 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2397
2398 chunk = btrfs_item_ptr(leaf, path->slots[0],
2399 struct btrfs_chunk);
2400 chunk_type = btrfs_chunk_type(leaf, chunk);
2401 btrfs_release_path(path);
2402
2403 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2404 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2405 found_key.objectid,
2406 found_key.offset);
2407 if (ret == -ENOSPC)
2408 failed++;
2409 else if (ret)
2410 BUG();
2411 }
2412
2413 if (found_key.offset == 0)
2414 break;
2415 key.offset = found_key.offset - 1;
2416 }
2417 ret = 0;
2418 if (failed && !retried) {
2419 failed = 0;
2420 retried = true;
2421 goto again;
2422 } else if (failed && retried) {
2423 WARN_ON(1);
2424 ret = -ENOSPC;
2425 }
2426 error:
2427 btrfs_free_path(path);
2428 return ret;
2429 }
2430
2431 static int insert_balance_item(struct btrfs_root *root,
2432 struct btrfs_balance_control *bctl)
2433 {
2434 struct btrfs_trans_handle *trans;
2435 struct btrfs_balance_item *item;
2436 struct btrfs_disk_balance_args disk_bargs;
2437 struct btrfs_path *path;
2438 struct extent_buffer *leaf;
2439 struct btrfs_key key;
2440 int ret, err;
2441
2442 path = btrfs_alloc_path();
2443 if (!path)
2444 return -ENOMEM;
2445
2446 trans = btrfs_start_transaction(root, 0);
2447 if (IS_ERR(trans)) {
2448 btrfs_free_path(path);
2449 return PTR_ERR(trans);
2450 }
2451
2452 key.objectid = BTRFS_BALANCE_OBJECTID;
2453 key.type = BTRFS_BALANCE_ITEM_KEY;
2454 key.offset = 0;
2455
2456 ret = btrfs_insert_empty_item(trans, root, path, &key,
2457 sizeof(*item));
2458 if (ret)
2459 goto out;
2460
2461 leaf = path->nodes[0];
2462 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2463
2464 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2465
2466 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2467 btrfs_set_balance_data(leaf, item, &disk_bargs);
2468 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2469 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2470 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2471 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2472
2473 btrfs_set_balance_flags(leaf, item, bctl->flags);
2474
2475 btrfs_mark_buffer_dirty(leaf);
2476 out:
2477 btrfs_free_path(path);
2478 err = btrfs_commit_transaction(trans, root);
2479 if (err && !ret)
2480 ret = err;
2481 return ret;
2482 }
2483
2484 static int del_balance_item(struct btrfs_root *root)
2485 {
2486 struct btrfs_trans_handle *trans;
2487 struct btrfs_path *path;
2488 struct btrfs_key key;
2489 int ret, err;
2490
2491 path = btrfs_alloc_path();
2492 if (!path)
2493 return -ENOMEM;
2494
2495 trans = btrfs_start_transaction(root, 0);
2496 if (IS_ERR(trans)) {
2497 btrfs_free_path(path);
2498 return PTR_ERR(trans);
2499 }
2500
2501 key.objectid = BTRFS_BALANCE_OBJECTID;
2502 key.type = BTRFS_BALANCE_ITEM_KEY;
2503 key.offset = 0;
2504
2505 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2506 if (ret < 0)
2507 goto out;
2508 if (ret > 0) {
2509 ret = -ENOENT;
2510 goto out;
2511 }
2512
2513 ret = btrfs_del_item(trans, root, path);
2514 out:
2515 btrfs_free_path(path);
2516 err = btrfs_commit_transaction(trans, root);
2517 if (err && !ret)
2518 ret = err;
2519 return ret;
2520 }
2521
2522 /*
2523 * This is a heuristic used to reduce the number of chunks balanced on
2524 * resume after balance was interrupted.
2525 */
2526 static void update_balance_args(struct btrfs_balance_control *bctl)
2527 {
2528 /*
2529 * Turn on soft mode for chunk types that were being converted.
2530 */
2531 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2532 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2533 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2534 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2535 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2536 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2537
2538 /*
2539 * Turn on usage filter if is not already used. The idea is
2540 * that chunks that we have already balanced should be
2541 * reasonably full. Don't do it for chunks that are being
2542 * converted - that will keep us from relocating unconverted
2543 * (albeit full) chunks.
2544 */
2545 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2546 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2547 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2548 bctl->data.usage = 90;
2549 }
2550 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2551 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2552 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2553 bctl->sys.usage = 90;
2554 }
2555 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2556 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2557 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2558 bctl->meta.usage = 90;
2559 }
2560 }
2561
2562 /*
2563 * Should be called with both balance and volume mutexes held to
2564 * serialize other volume operations (add_dev/rm_dev/resize) with
2565 * restriper. Same goes for unset_balance_control.
2566 */
2567 static void set_balance_control(struct btrfs_balance_control *bctl)
2568 {
2569 struct btrfs_fs_info *fs_info = bctl->fs_info;
2570
2571 BUG_ON(fs_info->balance_ctl);
2572
2573 spin_lock(&fs_info->balance_lock);
2574 fs_info->balance_ctl = bctl;
2575 spin_unlock(&fs_info->balance_lock);
2576 }
2577
2578 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2579 {
2580 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2581
2582 BUG_ON(!fs_info->balance_ctl);
2583
2584 spin_lock(&fs_info->balance_lock);
2585 fs_info->balance_ctl = NULL;
2586 spin_unlock(&fs_info->balance_lock);
2587
2588 kfree(bctl);
2589 }
2590
2591 /*
2592 * Balance filters. Return 1 if chunk should be filtered out
2593 * (should not be balanced).
2594 */
2595 static int chunk_profiles_filter(u64 chunk_type,
2596 struct btrfs_balance_args *bargs)
2597 {
2598 chunk_type = chunk_to_extended(chunk_type) &
2599 BTRFS_EXTENDED_PROFILE_MASK;
2600
2601 if (bargs->profiles & chunk_type)
2602 return 0;
2603
2604 return 1;
2605 }
2606
2607 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2608 struct btrfs_balance_args *bargs)
2609 {
2610 struct btrfs_block_group_cache *cache;
2611 u64 chunk_used, user_thresh;
2612 int ret = 1;
2613
2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2615 chunk_used = btrfs_block_group_used(&cache->item);
2616
2617 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2618 if (chunk_used < user_thresh)
2619 ret = 0;
2620
2621 btrfs_put_block_group(cache);
2622 return ret;
2623 }
2624
2625 static int chunk_devid_filter(struct extent_buffer *leaf,
2626 struct btrfs_chunk *chunk,
2627 struct btrfs_balance_args *bargs)
2628 {
2629 struct btrfs_stripe *stripe;
2630 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2631 int i;
2632
2633 for (i = 0; i < num_stripes; i++) {
2634 stripe = btrfs_stripe_nr(chunk, i);
2635 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2636 return 0;
2637 }
2638
2639 return 1;
2640 }
2641
2642 /* [pstart, pend) */
2643 static int chunk_drange_filter(struct extent_buffer *leaf,
2644 struct btrfs_chunk *chunk,
2645 u64 chunk_offset,
2646 struct btrfs_balance_args *bargs)
2647 {
2648 struct btrfs_stripe *stripe;
2649 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2650 u64 stripe_offset;
2651 u64 stripe_length;
2652 int factor;
2653 int i;
2654
2655 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2656 return 0;
2657
2658 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2659 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2660 factor = 2;
2661 else
2662 factor = 1;
2663 factor = num_stripes / factor;
2664
2665 for (i = 0; i < num_stripes; i++) {
2666 stripe = btrfs_stripe_nr(chunk, i);
2667 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2668 continue;
2669
2670 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2671 stripe_length = btrfs_chunk_length(leaf, chunk);
2672 do_div(stripe_length, factor);
2673
2674 if (stripe_offset < bargs->pend &&
2675 stripe_offset + stripe_length > bargs->pstart)
2676 return 0;
2677 }
2678
2679 return 1;
2680 }
2681
2682 /* [vstart, vend) */
2683 static int chunk_vrange_filter(struct extent_buffer *leaf,
2684 struct btrfs_chunk *chunk,
2685 u64 chunk_offset,
2686 struct btrfs_balance_args *bargs)
2687 {
2688 if (chunk_offset < bargs->vend &&
2689 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2690 /* at least part of the chunk is inside this vrange */
2691 return 0;
2692
2693 return 1;
2694 }
2695
2696 static int chunk_soft_convert_filter(u64 chunk_type,
2697 struct btrfs_balance_args *bargs)
2698 {
2699 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2700 return 0;
2701
2702 chunk_type = chunk_to_extended(chunk_type) &
2703 BTRFS_EXTENDED_PROFILE_MASK;
2704
2705 if (bargs->target == chunk_type)
2706 return 1;
2707
2708 return 0;
2709 }
2710
2711 static int should_balance_chunk(struct btrfs_root *root,
2712 struct extent_buffer *leaf,
2713 struct btrfs_chunk *chunk, u64 chunk_offset)
2714 {
2715 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2716 struct btrfs_balance_args *bargs = NULL;
2717 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2718
2719 /* type filter */
2720 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2721 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2722 return 0;
2723 }
2724
2725 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2726 bargs = &bctl->data;
2727 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2728 bargs = &bctl->sys;
2729 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2730 bargs = &bctl->meta;
2731
2732 /* profiles filter */
2733 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2734 chunk_profiles_filter(chunk_type, bargs)) {
2735 return 0;
2736 }
2737
2738 /* usage filter */
2739 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2740 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2741 return 0;
2742 }
2743
2744 /* devid filter */
2745 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2746 chunk_devid_filter(leaf, chunk, bargs)) {
2747 return 0;
2748 }
2749
2750 /* drange filter, makes sense only with devid filter */
2751 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2752 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2753 return 0;
2754 }
2755
2756 /* vrange filter */
2757 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2758 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2759 return 0;
2760 }
2761
2762 /* soft profile changing mode */
2763 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2764 chunk_soft_convert_filter(chunk_type, bargs)) {
2765 return 0;
2766 }
2767
2768 return 1;
2769 }
2770
2771 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2772 {
2773 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2774 struct btrfs_root *chunk_root = fs_info->chunk_root;
2775 struct btrfs_root *dev_root = fs_info->dev_root;
2776 struct list_head *devices;
2777 struct btrfs_device *device;
2778 u64 old_size;
2779 u64 size_to_free;
2780 struct btrfs_chunk *chunk;
2781 struct btrfs_path *path;
2782 struct btrfs_key key;
2783 struct btrfs_key found_key;
2784 struct btrfs_trans_handle *trans;
2785 struct extent_buffer *leaf;
2786 int slot;
2787 int ret;
2788 int enospc_errors = 0;
2789 bool counting = true;
2790
2791 /* step one make some room on all the devices */
2792 devices = &fs_info->fs_devices->devices;
2793 list_for_each_entry(device, devices, dev_list) {
2794 old_size = device->total_bytes;
2795 size_to_free = div_factor(old_size, 1);
2796 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2797 if (!device->writeable ||
2798 device->total_bytes - device->bytes_used > size_to_free ||
2799 device->is_tgtdev_for_dev_replace)
2800 continue;
2801
2802 ret = btrfs_shrink_device(device, old_size - size_to_free);
2803 if (ret == -ENOSPC)
2804 break;
2805 BUG_ON(ret);
2806
2807 trans = btrfs_start_transaction(dev_root, 0);
2808 BUG_ON(IS_ERR(trans));
2809
2810 ret = btrfs_grow_device(trans, device, old_size);
2811 BUG_ON(ret);
2812
2813 btrfs_end_transaction(trans, dev_root);
2814 }
2815
2816 /* step two, relocate all the chunks */
2817 path = btrfs_alloc_path();
2818 if (!path) {
2819 ret = -ENOMEM;
2820 goto error;
2821 }
2822
2823 /* zero out stat counters */
2824 spin_lock(&fs_info->balance_lock);
2825 memset(&bctl->stat, 0, sizeof(bctl->stat));
2826 spin_unlock(&fs_info->balance_lock);
2827 again:
2828 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2829 key.offset = (u64)-1;
2830 key.type = BTRFS_CHUNK_ITEM_KEY;
2831
2832 while (1) {
2833 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2834 atomic_read(&fs_info->balance_cancel_req)) {
2835 ret = -ECANCELED;
2836 goto error;
2837 }
2838
2839 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2840 if (ret < 0)
2841 goto error;
2842
2843 /*
2844 * this shouldn't happen, it means the last relocate
2845 * failed
2846 */
2847 if (ret == 0)
2848 BUG(); /* FIXME break ? */
2849
2850 ret = btrfs_previous_item(chunk_root, path, 0,
2851 BTRFS_CHUNK_ITEM_KEY);
2852 if (ret) {
2853 ret = 0;
2854 break;
2855 }
2856
2857 leaf = path->nodes[0];
2858 slot = path->slots[0];
2859 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2860
2861 if (found_key.objectid != key.objectid)
2862 break;
2863
2864 /* chunk zero is special */
2865 if (found_key.offset == 0)
2866 break;
2867
2868 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2869
2870 if (!counting) {
2871 spin_lock(&fs_info->balance_lock);
2872 bctl->stat.considered++;
2873 spin_unlock(&fs_info->balance_lock);
2874 }
2875
2876 ret = should_balance_chunk(chunk_root, leaf, chunk,
2877 found_key.offset);
2878 btrfs_release_path(path);
2879 if (!ret)
2880 goto loop;
2881
2882 if (counting) {
2883 spin_lock(&fs_info->balance_lock);
2884 bctl->stat.expected++;
2885 spin_unlock(&fs_info->balance_lock);
2886 goto loop;
2887 }
2888
2889 ret = btrfs_relocate_chunk(chunk_root,
2890 chunk_root->root_key.objectid,
2891 found_key.objectid,
2892 found_key.offset);
2893 if (ret && ret != -ENOSPC)
2894 goto error;
2895 if (ret == -ENOSPC) {
2896 enospc_errors++;
2897 } else {
2898 spin_lock(&fs_info->balance_lock);
2899 bctl->stat.completed++;
2900 spin_unlock(&fs_info->balance_lock);
2901 }
2902 loop:
2903 key.offset = found_key.offset - 1;
2904 }
2905
2906 if (counting) {
2907 btrfs_release_path(path);
2908 counting = false;
2909 goto again;
2910 }
2911 error:
2912 btrfs_free_path(path);
2913 if (enospc_errors) {
2914 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2915 enospc_errors);
2916 if (!ret)
2917 ret = -ENOSPC;
2918 }
2919
2920 return ret;
2921 }
2922
2923 /**
2924 * alloc_profile_is_valid - see if a given profile is valid and reduced
2925 * @flags: profile to validate
2926 * @extended: if true @flags is treated as an extended profile
2927 */
2928 static int alloc_profile_is_valid(u64 flags, int extended)
2929 {
2930 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2931 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2932
2933 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2934
2935 /* 1) check that all other bits are zeroed */
2936 if (flags & ~mask)
2937 return 0;
2938
2939 /* 2) see if profile is reduced */
2940 if (flags == 0)
2941 return !extended; /* "0" is valid for usual profiles */
2942
2943 /* true if exactly one bit set */
2944 return (flags & (flags - 1)) == 0;
2945 }
2946
2947 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2948 {
2949 /* cancel requested || normal exit path */
2950 return atomic_read(&fs_info->balance_cancel_req) ||
2951 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2952 atomic_read(&fs_info->balance_cancel_req) == 0);
2953 }
2954
2955 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2956 {
2957 int ret;
2958
2959 unset_balance_control(fs_info);
2960 ret = del_balance_item(fs_info->tree_root);
2961 BUG_ON(ret);
2962 }
2963
2964 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2965 struct btrfs_ioctl_balance_args *bargs);
2966
2967 /*
2968 * Should be called with both balance and volume mutexes held
2969 */
2970 int btrfs_balance(struct btrfs_balance_control *bctl,
2971 struct btrfs_ioctl_balance_args *bargs)
2972 {
2973 struct btrfs_fs_info *fs_info = bctl->fs_info;
2974 u64 allowed;
2975 int mixed = 0;
2976 int ret;
2977 u64 num_devices;
2978
2979 if (btrfs_fs_closing(fs_info) ||
2980 atomic_read(&fs_info->balance_pause_req) ||
2981 atomic_read(&fs_info->balance_cancel_req)) {
2982 ret = -EINVAL;
2983 goto out;
2984 }
2985
2986 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2987 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2988 mixed = 1;
2989
2990 /*
2991 * In case of mixed groups both data and meta should be picked,
2992 * and identical options should be given for both of them.
2993 */
2994 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2995 if (mixed && (bctl->flags & allowed)) {
2996 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2997 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2998 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2999 printk(KERN_ERR "btrfs: with mixed groups data and "
3000 "metadata balance options must be the same\n");
3001 ret = -EINVAL;
3002 goto out;
3003 }
3004 }
3005
3006 num_devices = fs_info->fs_devices->num_devices;
3007 btrfs_dev_replace_lock(&fs_info->dev_replace);
3008 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3009 BUG_ON(num_devices < 1);
3010 num_devices--;
3011 }
3012 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3013 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3014 if (num_devices == 1)
3015 allowed |= BTRFS_BLOCK_GROUP_DUP;
3016 else if (num_devices < 4)
3017 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3018 else
3019 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
3020 BTRFS_BLOCK_GROUP_RAID10);
3021
3022 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3023 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3024 (bctl->data.target & ~allowed))) {
3025 printk(KERN_ERR "btrfs: unable to start balance with target "
3026 "data profile %llu\n",
3027 (unsigned long long)bctl->data.target);
3028 ret = -EINVAL;
3029 goto out;
3030 }
3031 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3032 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3033 (bctl->meta.target & ~allowed))) {
3034 printk(KERN_ERR "btrfs: unable to start balance with target "
3035 "metadata profile %llu\n",
3036 (unsigned long long)bctl->meta.target);
3037 ret = -EINVAL;
3038 goto out;
3039 }
3040 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3041 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3042 (bctl->sys.target & ~allowed))) {
3043 printk(KERN_ERR "btrfs: unable to start balance with target "
3044 "system profile %llu\n",
3045 (unsigned long long)bctl->sys.target);
3046 ret = -EINVAL;
3047 goto out;
3048 }
3049
3050 /* allow dup'ed data chunks only in mixed mode */
3051 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3052 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3053 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3054 ret = -EINVAL;
3055 goto out;
3056 }
3057
3058 /* allow to reduce meta or sys integrity only if force set */
3059 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3060 BTRFS_BLOCK_GROUP_RAID10;
3061 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3062 (fs_info->avail_system_alloc_bits & allowed) &&
3063 !(bctl->sys.target & allowed)) ||
3064 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3065 (fs_info->avail_metadata_alloc_bits & allowed) &&
3066 !(bctl->meta.target & allowed))) {
3067 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3068 printk(KERN_INFO "btrfs: force reducing metadata "
3069 "integrity\n");
3070 } else {
3071 printk(KERN_ERR "btrfs: balance will reduce metadata "
3072 "integrity, use force if you want this\n");
3073 ret = -EINVAL;
3074 goto out;
3075 }
3076 }
3077
3078 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3079 int num_tolerated_disk_barrier_failures;
3080 u64 target = bctl->sys.target;
3081
3082 num_tolerated_disk_barrier_failures =
3083 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3084 if (num_tolerated_disk_barrier_failures > 0 &&
3085 (target &
3086 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3087 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3088 num_tolerated_disk_barrier_failures = 0;
3089 else if (num_tolerated_disk_barrier_failures > 1 &&
3090 (target &
3091 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3092 num_tolerated_disk_barrier_failures = 1;
3093
3094 fs_info->num_tolerated_disk_barrier_failures =
3095 num_tolerated_disk_barrier_failures;
3096 }
3097
3098 ret = insert_balance_item(fs_info->tree_root, bctl);
3099 if (ret && ret != -EEXIST)
3100 goto out;
3101
3102 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3103 BUG_ON(ret == -EEXIST);
3104 set_balance_control(bctl);
3105 } else {
3106 BUG_ON(ret != -EEXIST);
3107 spin_lock(&fs_info->balance_lock);
3108 update_balance_args(bctl);
3109 spin_unlock(&fs_info->balance_lock);
3110 }
3111
3112 atomic_inc(&fs_info->balance_running);
3113 mutex_unlock(&fs_info->balance_mutex);
3114
3115 ret = __btrfs_balance(fs_info);
3116
3117 mutex_lock(&fs_info->balance_mutex);
3118 atomic_dec(&fs_info->balance_running);
3119
3120 if (bargs) {
3121 memset(bargs, 0, sizeof(*bargs));
3122 update_ioctl_balance_args(fs_info, 0, bargs);
3123 }
3124
3125 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3126 balance_need_close(fs_info)) {
3127 __cancel_balance(fs_info);
3128 }
3129
3130 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3131 fs_info->num_tolerated_disk_barrier_failures =
3132 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3133 }
3134
3135 wake_up(&fs_info->balance_wait_q);
3136
3137 return ret;
3138 out:
3139 if (bctl->flags & BTRFS_BALANCE_RESUME)
3140 __cancel_balance(fs_info);
3141 else
3142 kfree(bctl);
3143 return ret;
3144 }
3145
3146 static int balance_kthread(void *data)
3147 {
3148 struct btrfs_fs_info *fs_info = data;
3149 int ret = 0;
3150
3151 mutex_lock(&fs_info->volume_mutex);
3152 mutex_lock(&fs_info->balance_mutex);
3153
3154 if (fs_info->balance_ctl) {
3155 printk(KERN_INFO "btrfs: continuing balance\n");
3156 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3157 }
3158
3159 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3160 mutex_unlock(&fs_info->balance_mutex);
3161 mutex_unlock(&fs_info->volume_mutex);
3162
3163 return ret;
3164 }
3165
3166 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3167 {
3168 struct task_struct *tsk;
3169
3170 spin_lock(&fs_info->balance_lock);
3171 if (!fs_info->balance_ctl) {
3172 spin_unlock(&fs_info->balance_lock);
3173 return 0;
3174 }
3175 spin_unlock(&fs_info->balance_lock);
3176
3177 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3178 printk(KERN_INFO "btrfs: force skipping balance\n");
3179 return 0;
3180 }
3181
3182 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3183 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3184 if (IS_ERR(tsk))
3185 return PTR_ERR(tsk);
3186
3187 return 0;
3188 }
3189
3190 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3191 {
3192 struct btrfs_balance_control *bctl;
3193 struct btrfs_balance_item *item;
3194 struct btrfs_disk_balance_args disk_bargs;
3195 struct btrfs_path *path;
3196 struct extent_buffer *leaf;
3197 struct btrfs_key key;
3198 int ret;
3199
3200 path = btrfs_alloc_path();
3201 if (!path)
3202 return -ENOMEM;
3203
3204 key.objectid = BTRFS_BALANCE_OBJECTID;
3205 key.type = BTRFS_BALANCE_ITEM_KEY;
3206 key.offset = 0;
3207
3208 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3209 if (ret < 0)
3210 goto out;
3211 if (ret > 0) { /* ret = -ENOENT; */
3212 ret = 0;
3213 goto out;
3214 }
3215
3216 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3217 if (!bctl) {
3218 ret = -ENOMEM;
3219 goto out;
3220 }
3221
3222 leaf = path->nodes[0];
3223 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3224
3225 bctl->fs_info = fs_info;
3226 bctl->flags = btrfs_balance_flags(leaf, item);
3227 bctl->flags |= BTRFS_BALANCE_RESUME;
3228
3229 btrfs_balance_data(leaf, item, &disk_bargs);
3230 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3231 btrfs_balance_meta(leaf, item, &disk_bargs);
3232 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3233 btrfs_balance_sys(leaf, item, &disk_bargs);
3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3235
3236 mutex_lock(&fs_info->volume_mutex);
3237 mutex_lock(&fs_info->balance_mutex);
3238
3239 set_balance_control(bctl);
3240
3241 mutex_unlock(&fs_info->balance_mutex);
3242 mutex_unlock(&fs_info->volume_mutex);
3243 out:
3244 btrfs_free_path(path);
3245 return ret;
3246 }
3247
3248 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3249 {
3250 int ret = 0;
3251
3252 mutex_lock(&fs_info->balance_mutex);
3253 if (!fs_info->balance_ctl) {
3254 mutex_unlock(&fs_info->balance_mutex);
3255 return -ENOTCONN;
3256 }
3257
3258 if (atomic_read(&fs_info->balance_running)) {
3259 atomic_inc(&fs_info->balance_pause_req);
3260 mutex_unlock(&fs_info->balance_mutex);
3261
3262 wait_event(fs_info->balance_wait_q,
3263 atomic_read(&fs_info->balance_running) == 0);
3264
3265 mutex_lock(&fs_info->balance_mutex);
3266 /* we are good with balance_ctl ripped off from under us */
3267 BUG_ON(atomic_read(&fs_info->balance_running));
3268 atomic_dec(&fs_info->balance_pause_req);
3269 } else {
3270 ret = -ENOTCONN;
3271 }
3272
3273 mutex_unlock(&fs_info->balance_mutex);
3274 return ret;
3275 }
3276
3277 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3278 {
3279 mutex_lock(&fs_info->balance_mutex);
3280 if (!fs_info->balance_ctl) {
3281 mutex_unlock(&fs_info->balance_mutex);
3282 return -ENOTCONN;
3283 }
3284
3285 atomic_inc(&fs_info->balance_cancel_req);
3286 /*
3287 * if we are running just wait and return, balance item is
3288 * deleted in btrfs_balance in this case
3289 */
3290 if (atomic_read(&fs_info->balance_running)) {
3291 mutex_unlock(&fs_info->balance_mutex);
3292 wait_event(fs_info->balance_wait_q,
3293 atomic_read(&fs_info->balance_running) == 0);
3294 mutex_lock(&fs_info->balance_mutex);
3295 } else {
3296 /* __cancel_balance needs volume_mutex */
3297 mutex_unlock(&fs_info->balance_mutex);
3298 mutex_lock(&fs_info->volume_mutex);
3299 mutex_lock(&fs_info->balance_mutex);
3300
3301 if (fs_info->balance_ctl)
3302 __cancel_balance(fs_info);
3303
3304 mutex_unlock(&fs_info->volume_mutex);
3305 }
3306
3307 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3308 atomic_dec(&fs_info->balance_cancel_req);
3309 mutex_unlock(&fs_info->balance_mutex);
3310 return 0;
3311 }
3312
3313 /*
3314 * shrinking a device means finding all of the device extents past
3315 * the new size, and then following the back refs to the chunks.
3316 * The chunk relocation code actually frees the device extent
3317 */
3318 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3319 {
3320 struct btrfs_trans_handle *trans;
3321 struct btrfs_root *root = device->dev_root;
3322 struct btrfs_dev_extent *dev_extent = NULL;
3323 struct btrfs_path *path;
3324 u64 length;
3325 u64 chunk_tree;
3326 u64 chunk_objectid;
3327 u64 chunk_offset;
3328 int ret;
3329 int slot;
3330 int failed = 0;
3331 bool retried = false;
3332 struct extent_buffer *l;
3333 struct btrfs_key key;
3334 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3335 u64 old_total = btrfs_super_total_bytes(super_copy);
3336 u64 old_size = device->total_bytes;
3337 u64 diff = device->total_bytes - new_size;
3338
3339 if (device->is_tgtdev_for_dev_replace)
3340 return -EINVAL;
3341
3342 path = btrfs_alloc_path();
3343 if (!path)
3344 return -ENOMEM;
3345
3346 path->reada = 2;
3347
3348 lock_chunks(root);
3349
3350 device->total_bytes = new_size;
3351 if (device->writeable) {
3352 device->fs_devices->total_rw_bytes -= diff;
3353 spin_lock(&root->fs_info->free_chunk_lock);
3354 root->fs_info->free_chunk_space -= diff;
3355 spin_unlock(&root->fs_info->free_chunk_lock);
3356 }
3357 unlock_chunks(root);
3358
3359 again:
3360 key.objectid = device->devid;
3361 key.offset = (u64)-1;
3362 key.type = BTRFS_DEV_EXTENT_KEY;
3363
3364 do {
3365 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3366 if (ret < 0)
3367 goto done;
3368
3369 ret = btrfs_previous_item(root, path, 0, key.type);
3370 if (ret < 0)
3371 goto done;
3372 if (ret) {
3373 ret = 0;
3374 btrfs_release_path(path);
3375 break;
3376 }
3377
3378 l = path->nodes[0];
3379 slot = path->slots[0];
3380 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3381
3382 if (key.objectid != device->devid) {
3383 btrfs_release_path(path);
3384 break;
3385 }
3386
3387 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3388 length = btrfs_dev_extent_length(l, dev_extent);
3389
3390 if (key.offset + length <= new_size) {
3391 btrfs_release_path(path);
3392 break;
3393 }
3394
3395 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3396 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3397 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3398 btrfs_release_path(path);
3399
3400 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3401 chunk_offset);
3402 if (ret && ret != -ENOSPC)
3403 goto done;
3404 if (ret == -ENOSPC)
3405 failed++;
3406 } while (key.offset-- > 0);
3407
3408 if (failed && !retried) {
3409 failed = 0;
3410 retried = true;
3411 goto again;
3412 } else if (failed && retried) {
3413 ret = -ENOSPC;
3414 lock_chunks(root);
3415
3416 device->total_bytes = old_size;
3417 if (device->writeable)
3418 device->fs_devices->total_rw_bytes += diff;
3419 spin_lock(&root->fs_info->free_chunk_lock);
3420 root->fs_info->free_chunk_space += diff;
3421 spin_unlock(&root->fs_info->free_chunk_lock);
3422 unlock_chunks(root);
3423 goto done;
3424 }
3425
3426 /* Shrinking succeeded, else we would be at "done". */
3427 trans = btrfs_start_transaction(root, 0);
3428 if (IS_ERR(trans)) {
3429 ret = PTR_ERR(trans);
3430 goto done;
3431 }
3432
3433 lock_chunks(root);
3434
3435 device->disk_total_bytes = new_size;
3436 /* Now btrfs_update_device() will change the on-disk size. */
3437 ret = btrfs_update_device(trans, device);
3438 if (ret) {
3439 unlock_chunks(root);
3440 btrfs_end_transaction(trans, root);
3441 goto done;
3442 }
3443 WARN_ON(diff > old_total);
3444 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3445 unlock_chunks(root);
3446 btrfs_end_transaction(trans, root);
3447 done:
3448 btrfs_free_path(path);
3449 return ret;
3450 }
3451
3452 static int btrfs_add_system_chunk(struct btrfs_root *root,
3453 struct btrfs_key *key,
3454 struct btrfs_chunk *chunk, int item_size)
3455 {
3456 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3457 struct btrfs_disk_key disk_key;
3458 u32 array_size;
3459 u8 *ptr;
3460
3461 array_size = btrfs_super_sys_array_size(super_copy);
3462 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3463 return -EFBIG;
3464
3465 ptr = super_copy->sys_chunk_array + array_size;
3466 btrfs_cpu_key_to_disk(&disk_key, key);
3467 memcpy(ptr, &disk_key, sizeof(disk_key));
3468 ptr += sizeof(disk_key);
3469 memcpy(ptr, chunk, item_size);
3470 item_size += sizeof(disk_key);
3471 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3472 return 0;
3473 }
3474
3475 /*
3476 * sort the devices in descending order by max_avail, total_avail
3477 */
3478 static int btrfs_cmp_device_info(const void *a, const void *b)
3479 {
3480 const struct btrfs_device_info *di_a = a;
3481 const struct btrfs_device_info *di_b = b;
3482
3483 if (di_a->max_avail > di_b->max_avail)
3484 return -1;
3485 if (di_a->max_avail < di_b->max_avail)
3486 return 1;
3487 if (di_a->total_avail > di_b->total_avail)
3488 return -1;
3489 if (di_a->total_avail < di_b->total_avail)
3490 return 1;
3491 return 0;
3492 }
3493
3494 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3495 struct btrfs_root *extent_root,
3496 struct map_lookup **map_ret,
3497 u64 *num_bytes_out, u64 *stripe_size_out,
3498 u64 start, u64 type)
3499 {
3500 struct btrfs_fs_info *info = extent_root->fs_info;
3501 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3502 struct list_head *cur;
3503 struct map_lookup *map = NULL;
3504 struct extent_map_tree *em_tree;
3505 struct extent_map *em;
3506 struct btrfs_device_info *devices_info = NULL;
3507 u64 total_avail;
3508 int num_stripes; /* total number of stripes to allocate */
3509 int sub_stripes; /* sub_stripes info for map */
3510 int dev_stripes; /* stripes per dev */
3511 int devs_max; /* max devs to use */
3512 int devs_min; /* min devs needed */
3513 int devs_increment; /* ndevs has to be a multiple of this */
3514 int ncopies; /* how many copies to data has */
3515 int ret;
3516 u64 max_stripe_size;
3517 u64 max_chunk_size;
3518 u64 stripe_size;
3519 u64 num_bytes;
3520 int ndevs;
3521 int i;
3522 int j;
3523
3524 BUG_ON(!alloc_profile_is_valid(type, 0));
3525
3526 if (list_empty(&fs_devices->alloc_list))
3527 return -ENOSPC;
3528
3529 sub_stripes = 1;
3530 dev_stripes = 1;
3531 devs_increment = 1;
3532 ncopies = 1;
3533 devs_max = 0; /* 0 == as many as possible */
3534 devs_min = 1;
3535
3536 /*
3537 * define the properties of each RAID type.
3538 * FIXME: move this to a global table and use it in all RAID
3539 * calculation code
3540 */
3541 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3542 dev_stripes = 2;
3543 ncopies = 2;
3544 devs_max = 1;
3545 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3546 devs_min = 2;
3547 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3548 devs_increment = 2;
3549 ncopies = 2;
3550 devs_max = 2;
3551 devs_min = 2;
3552 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3553 sub_stripes = 2;
3554 devs_increment = 2;
3555 ncopies = 2;
3556 devs_min = 4;
3557 } else {
3558 devs_max = 1;
3559 }
3560
3561 if (type & BTRFS_BLOCK_GROUP_DATA) {
3562 max_stripe_size = 1024 * 1024 * 1024;
3563 max_chunk_size = 10 * max_stripe_size;
3564 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3565 /* for larger filesystems, use larger metadata chunks */
3566 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3567 max_stripe_size = 1024 * 1024 * 1024;
3568 else
3569 max_stripe_size = 256 * 1024 * 1024;
3570 max_chunk_size = max_stripe_size;
3571 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3572 max_stripe_size = 32 * 1024 * 1024;
3573 max_chunk_size = 2 * max_stripe_size;
3574 } else {
3575 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3576 type);
3577 BUG_ON(1);
3578 }
3579
3580 /* we don't want a chunk larger than 10% of writeable space */
3581 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3582 max_chunk_size);
3583
3584 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3585 GFP_NOFS);
3586 if (!devices_info)
3587 return -ENOMEM;
3588
3589 cur = fs_devices->alloc_list.next;
3590
3591 /*
3592 * in the first pass through the devices list, we gather information
3593 * about the available holes on each device.
3594 */
3595 ndevs = 0;
3596 while (cur != &fs_devices->alloc_list) {
3597 struct btrfs_device *device;
3598 u64 max_avail;
3599 u64 dev_offset;
3600
3601 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3602
3603 cur = cur->next;
3604
3605 if (!device->writeable) {
3606 WARN(1, KERN_ERR
3607 "btrfs: read-only device in alloc_list\n");
3608 continue;
3609 }
3610
3611 if (!device->in_fs_metadata ||
3612 device->is_tgtdev_for_dev_replace)
3613 continue;
3614
3615 if (device->total_bytes > device->bytes_used)
3616 total_avail = device->total_bytes - device->bytes_used;
3617 else
3618 total_avail = 0;
3619
3620 /* If there is no space on this device, skip it. */
3621 if (total_avail == 0)
3622 continue;
3623
3624 ret = find_free_dev_extent(device,
3625 max_stripe_size * dev_stripes,
3626 &dev_offset, &max_avail);
3627 if (ret && ret != -ENOSPC)
3628 goto error;
3629
3630 if (ret == 0)
3631 max_avail = max_stripe_size * dev_stripes;
3632
3633 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3634 continue;
3635
3636 devices_info[ndevs].dev_offset = dev_offset;
3637 devices_info[ndevs].max_avail = max_avail;
3638 devices_info[ndevs].total_avail = total_avail;
3639 devices_info[ndevs].dev = device;
3640 ++ndevs;
3641 WARN_ON(ndevs > fs_devices->rw_devices);
3642 }
3643
3644 /*
3645 * now sort the devices by hole size / available space
3646 */
3647 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3648 btrfs_cmp_device_info, NULL);
3649
3650 /* round down to number of usable stripes */
3651 ndevs -= ndevs % devs_increment;
3652
3653 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3654 ret = -ENOSPC;
3655 goto error;
3656 }
3657
3658 if (devs_max && ndevs > devs_max)
3659 ndevs = devs_max;
3660 /*
3661 * the primary goal is to maximize the number of stripes, so use as many
3662 * devices as possible, even if the stripes are not maximum sized.
3663 */
3664 stripe_size = devices_info[ndevs-1].max_avail;
3665 num_stripes = ndevs * dev_stripes;
3666
3667 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3668 stripe_size = max_chunk_size * ncopies;
3669 do_div(stripe_size, ndevs);
3670 }
3671
3672 do_div(stripe_size, dev_stripes);
3673
3674 /* align to BTRFS_STRIPE_LEN */
3675 do_div(stripe_size, BTRFS_STRIPE_LEN);
3676 stripe_size *= BTRFS_STRIPE_LEN;
3677
3678 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3679 if (!map) {
3680 ret = -ENOMEM;
3681 goto error;
3682 }
3683 map->num_stripes = num_stripes;
3684
3685 for (i = 0; i < ndevs; ++i) {
3686 for (j = 0; j < dev_stripes; ++j) {
3687 int s = i * dev_stripes + j;
3688 map->stripes[s].dev = devices_info[i].dev;
3689 map->stripes[s].physical = devices_info[i].dev_offset +
3690 j * stripe_size;
3691 }
3692 }
3693 map->sector_size = extent_root->sectorsize;
3694 map->stripe_len = BTRFS_STRIPE_LEN;
3695 map->io_align = BTRFS_STRIPE_LEN;
3696 map->io_width = BTRFS_STRIPE_LEN;
3697 map->type = type;
3698 map->sub_stripes = sub_stripes;
3699
3700 *map_ret = map;
3701 num_bytes = stripe_size * (num_stripes / ncopies);
3702
3703 *stripe_size_out = stripe_size;
3704 *num_bytes_out = num_bytes;
3705
3706 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3707
3708 em = alloc_extent_map();
3709 if (!em) {
3710 ret = -ENOMEM;
3711 goto error;
3712 }
3713 em->bdev = (struct block_device *)map;
3714 em->start = start;
3715 em->len = num_bytes;
3716 em->block_start = 0;
3717 em->block_len = em->len;
3718
3719 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3720 write_lock(&em_tree->lock);
3721 ret = add_extent_mapping(em_tree, em);
3722 write_unlock(&em_tree->lock);
3723 free_extent_map(em);
3724 if (ret)
3725 goto error;
3726
3727 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3728 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3729 start, num_bytes);
3730 if (ret)
3731 goto error;
3732
3733 for (i = 0; i < map->num_stripes; ++i) {
3734 struct btrfs_device *device;
3735 u64 dev_offset;
3736
3737 device = map->stripes[i].dev;
3738 dev_offset = map->stripes[i].physical;
3739
3740 ret = btrfs_alloc_dev_extent(trans, device,
3741 info->chunk_root->root_key.objectid,
3742 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3743 start, dev_offset, stripe_size);
3744 if (ret) {
3745 btrfs_abort_transaction(trans, extent_root, ret);
3746 goto error;
3747 }
3748 }
3749
3750 kfree(devices_info);
3751 return 0;
3752
3753 error:
3754 kfree(map);
3755 kfree(devices_info);
3756 return ret;
3757 }
3758
3759 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3760 struct btrfs_root *extent_root,
3761 struct map_lookup *map, u64 chunk_offset,
3762 u64 chunk_size, u64 stripe_size)
3763 {
3764 u64 dev_offset;
3765 struct btrfs_key key;
3766 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3767 struct btrfs_device *device;
3768 struct btrfs_chunk *chunk;
3769 struct btrfs_stripe *stripe;
3770 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3771 int index = 0;
3772 int ret;
3773
3774 chunk = kzalloc(item_size, GFP_NOFS);
3775 if (!chunk)
3776 return -ENOMEM;
3777
3778 index = 0;
3779 while (index < map->num_stripes) {
3780 device = map->stripes[index].dev;
3781 device->bytes_used += stripe_size;
3782 ret = btrfs_update_device(trans, device);
3783 if (ret)
3784 goto out_free;
3785 index++;
3786 }
3787
3788 spin_lock(&extent_root->fs_info->free_chunk_lock);
3789 extent_root->fs_info->free_chunk_space -= (stripe_size *
3790 map->num_stripes);
3791 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3792
3793 index = 0;
3794 stripe = &chunk->stripe;
3795 while (index < map->num_stripes) {
3796 device = map->stripes[index].dev;
3797 dev_offset = map->stripes[index].physical;
3798
3799 btrfs_set_stack_stripe_devid(stripe, device->devid);
3800 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3801 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3802 stripe++;
3803 index++;
3804 }
3805
3806 btrfs_set_stack_chunk_length(chunk, chunk_size);
3807 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3808 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3809 btrfs_set_stack_chunk_type(chunk, map->type);
3810 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3811 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3812 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3813 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3814 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3815
3816 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3817 key.type = BTRFS_CHUNK_ITEM_KEY;
3818 key.offset = chunk_offset;
3819
3820 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3821
3822 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3823 /*
3824 * TODO: Cleanup of inserted chunk root in case of
3825 * failure.
3826 */
3827 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3828 item_size);
3829 }
3830
3831 out_free:
3832 kfree(chunk);
3833 return ret;
3834 }
3835
3836 /*
3837 * Chunk allocation falls into two parts. The first part does works
3838 * that make the new allocated chunk useable, but not do any operation
3839 * that modifies the chunk tree. The second part does the works that
3840 * require modifying the chunk tree. This division is important for the
3841 * bootstrap process of adding storage to a seed btrfs.
3842 */
3843 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3844 struct btrfs_root *extent_root, u64 type)
3845 {
3846 u64 chunk_offset;
3847 u64 chunk_size;
3848 u64 stripe_size;
3849 struct map_lookup *map;
3850 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3851 int ret;
3852
3853 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3854 &chunk_offset);
3855 if (ret)
3856 return ret;
3857
3858 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3859 &stripe_size, chunk_offset, type);
3860 if (ret)
3861 return ret;
3862
3863 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3864 chunk_size, stripe_size);
3865 if (ret)
3866 return ret;
3867 return 0;
3868 }
3869
3870 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3871 struct btrfs_root *root,
3872 struct btrfs_device *device)
3873 {
3874 u64 chunk_offset;
3875 u64 sys_chunk_offset;
3876 u64 chunk_size;
3877 u64 sys_chunk_size;
3878 u64 stripe_size;
3879 u64 sys_stripe_size;
3880 u64 alloc_profile;
3881 struct map_lookup *map;
3882 struct map_lookup *sys_map;
3883 struct btrfs_fs_info *fs_info = root->fs_info;
3884 struct btrfs_root *extent_root = fs_info->extent_root;
3885 int ret;
3886
3887 ret = find_next_chunk(fs_info->chunk_root,
3888 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3889 if (ret)
3890 return ret;
3891
3892 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3893 fs_info->avail_metadata_alloc_bits;
3894 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3895
3896 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3897 &stripe_size, chunk_offset, alloc_profile);
3898 if (ret)
3899 return ret;
3900
3901 sys_chunk_offset = chunk_offset + chunk_size;
3902
3903 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3904 fs_info->avail_system_alloc_bits;
3905 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3906
3907 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3908 &sys_chunk_size, &sys_stripe_size,
3909 sys_chunk_offset, alloc_profile);
3910 if (ret) {
3911 btrfs_abort_transaction(trans, root, ret);
3912 goto out;
3913 }
3914
3915 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3916 if (ret) {
3917 btrfs_abort_transaction(trans, root, ret);
3918 goto out;
3919 }
3920
3921 /*
3922 * Modifying chunk tree needs allocating new blocks from both
3923 * system block group and metadata block group. So we only can
3924 * do operations require modifying the chunk tree after both
3925 * block groups were created.
3926 */
3927 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3928 chunk_size, stripe_size);
3929 if (ret) {
3930 btrfs_abort_transaction(trans, root, ret);
3931 goto out;
3932 }
3933
3934 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3935 sys_chunk_offset, sys_chunk_size,
3936 sys_stripe_size);
3937 if (ret)
3938 btrfs_abort_transaction(trans, root, ret);
3939
3940 out:
3941
3942 return ret;
3943 }
3944
3945 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3946 {
3947 struct extent_map *em;
3948 struct map_lookup *map;
3949 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3950 int readonly = 0;
3951 int i;
3952
3953 read_lock(&map_tree->map_tree.lock);
3954 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3955 read_unlock(&map_tree->map_tree.lock);
3956 if (!em)
3957 return 1;
3958
3959 if (btrfs_test_opt(root, DEGRADED)) {
3960 free_extent_map(em);
3961 return 0;
3962 }
3963
3964 map = (struct map_lookup *)em->bdev;
3965 for (i = 0; i < map->num_stripes; i++) {
3966 if (!map->stripes[i].dev->writeable) {
3967 readonly = 1;
3968 break;
3969 }
3970 }
3971 free_extent_map(em);
3972 return readonly;
3973 }
3974
3975 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3976 {
3977 extent_map_tree_init(&tree->map_tree);
3978 }
3979
3980 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3981 {
3982 struct extent_map *em;
3983
3984 while (1) {
3985 write_lock(&tree->map_tree.lock);
3986 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3987 if (em)
3988 remove_extent_mapping(&tree->map_tree, em);
3989 write_unlock(&tree->map_tree.lock);
3990 if (!em)
3991 break;
3992 kfree(em->bdev);
3993 /* once for us */
3994 free_extent_map(em);
3995 /* once for the tree */
3996 free_extent_map(em);
3997 }
3998 }
3999
4000 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4001 {
4002 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4003 struct extent_map *em;
4004 struct map_lookup *map;
4005 struct extent_map_tree *em_tree = &map_tree->map_tree;
4006 int ret;
4007
4008 read_lock(&em_tree->lock);
4009 em = lookup_extent_mapping(em_tree, logical, len);
4010 read_unlock(&em_tree->lock);
4011 BUG_ON(!em);
4012
4013 BUG_ON(em->start > logical || em->start + em->len < logical);
4014 map = (struct map_lookup *)em->bdev;
4015 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4016 ret = map->num_stripes;
4017 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4018 ret = map->sub_stripes;
4019 else
4020 ret = 1;
4021 free_extent_map(em);
4022
4023 btrfs_dev_replace_lock(&fs_info->dev_replace);
4024 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4025 ret++;
4026 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4027
4028 return ret;
4029 }
4030
4031 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4032 struct map_lookup *map, int first, int num,
4033 int optimal, int dev_replace_is_ongoing)
4034 {
4035 int i;
4036 int tolerance;
4037 struct btrfs_device *srcdev;
4038
4039 if (dev_replace_is_ongoing &&
4040 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4041 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4042 srcdev = fs_info->dev_replace.srcdev;
4043 else
4044 srcdev = NULL;
4045
4046 /*
4047 * try to avoid the drive that is the source drive for a
4048 * dev-replace procedure, only choose it if no other non-missing
4049 * mirror is available
4050 */
4051 for (tolerance = 0; tolerance < 2; tolerance++) {
4052 if (map->stripes[optimal].dev->bdev &&
4053 (tolerance || map->stripes[optimal].dev != srcdev))
4054 return optimal;
4055 for (i = first; i < first + num; i++) {
4056 if (map->stripes[i].dev->bdev &&
4057 (tolerance || map->stripes[i].dev != srcdev))
4058 return i;
4059 }
4060 }
4061
4062 /* we couldn't find one that doesn't fail. Just return something
4063 * and the io error handling code will clean up eventually
4064 */
4065 return optimal;
4066 }
4067
4068 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4069 u64 logical, u64 *length,
4070 struct btrfs_bio **bbio_ret,
4071 int mirror_num)
4072 {
4073 struct extent_map *em;
4074 struct map_lookup *map;
4075 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4076 struct extent_map_tree *em_tree = &map_tree->map_tree;
4077 u64 offset;
4078 u64 stripe_offset;
4079 u64 stripe_end_offset;
4080 u64 stripe_nr;
4081 u64 stripe_nr_orig;
4082 u64 stripe_nr_end;
4083 int stripe_index;
4084 int i;
4085 int ret = 0;
4086 int num_stripes;
4087 int max_errors = 0;
4088 struct btrfs_bio *bbio = NULL;
4089 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4090 int dev_replace_is_ongoing = 0;
4091 int num_alloc_stripes;
4092 int patch_the_first_stripe_for_dev_replace = 0;
4093 u64 physical_to_patch_in_first_stripe = 0;
4094
4095 read_lock(&em_tree->lock);
4096 em = lookup_extent_mapping(em_tree, logical, *length);
4097 read_unlock(&em_tree->lock);
4098
4099 if (!em) {
4100 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
4101 (unsigned long long)logical,
4102 (unsigned long long)*length);
4103 BUG();
4104 }
4105
4106 BUG_ON(em->start > logical || em->start + em->len < logical);
4107 map = (struct map_lookup *)em->bdev;
4108 offset = logical - em->start;
4109
4110 stripe_nr = offset;
4111 /*
4112 * stripe_nr counts the total number of stripes we have to stride
4113 * to get to this block
4114 */
4115 do_div(stripe_nr, map->stripe_len);
4116
4117 stripe_offset = stripe_nr * map->stripe_len;
4118 BUG_ON(offset < stripe_offset);
4119
4120 /* stripe_offset is the offset of this block in its stripe*/
4121 stripe_offset = offset - stripe_offset;
4122
4123 if (rw & REQ_DISCARD)
4124 *length = min_t(u64, em->len - offset, *length);
4125 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4126 /* we limit the length of each bio to what fits in a stripe */
4127 *length = min_t(u64, em->len - offset,
4128 map->stripe_len - stripe_offset);
4129 } else {
4130 *length = em->len - offset;
4131 }
4132
4133 if (!bbio_ret)
4134 goto out;
4135
4136 btrfs_dev_replace_lock(dev_replace);
4137 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4138 if (!dev_replace_is_ongoing)
4139 btrfs_dev_replace_unlock(dev_replace);
4140
4141 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4142 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4143 dev_replace->tgtdev != NULL) {
4144 /*
4145 * in dev-replace case, for repair case (that's the only
4146 * case where the mirror is selected explicitly when
4147 * calling btrfs_map_block), blocks left of the left cursor
4148 * can also be read from the target drive.
4149 * For REQ_GET_READ_MIRRORS, the target drive is added as
4150 * the last one to the array of stripes. For READ, it also
4151 * needs to be supported using the same mirror number.
4152 * If the requested block is not left of the left cursor,
4153 * EIO is returned. This can happen because btrfs_num_copies()
4154 * returns one more in the dev-replace case.
4155 */
4156 u64 tmp_length = *length;
4157 struct btrfs_bio *tmp_bbio = NULL;
4158 int tmp_num_stripes;
4159 u64 srcdev_devid = dev_replace->srcdev->devid;
4160 int index_srcdev = 0;
4161 int found = 0;
4162 u64 physical_of_found = 0;
4163
4164 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4165 logical, &tmp_length, &tmp_bbio, 0);
4166 if (ret) {
4167 WARN_ON(tmp_bbio != NULL);
4168 goto out;
4169 }
4170
4171 tmp_num_stripes = tmp_bbio->num_stripes;
4172 if (mirror_num > tmp_num_stripes) {
4173 /*
4174 * REQ_GET_READ_MIRRORS does not contain this
4175 * mirror, that means that the requested area
4176 * is not left of the left cursor
4177 */
4178 ret = -EIO;
4179 kfree(tmp_bbio);
4180 goto out;
4181 }
4182
4183 /*
4184 * process the rest of the function using the mirror_num
4185 * of the source drive. Therefore look it up first.
4186 * At the end, patch the device pointer to the one of the
4187 * target drive.
4188 */
4189 for (i = 0; i < tmp_num_stripes; i++) {
4190 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4191 /*
4192 * In case of DUP, in order to keep it
4193 * simple, only add the mirror with the
4194 * lowest physical address
4195 */
4196 if (found &&
4197 physical_of_found <=
4198 tmp_bbio->stripes[i].physical)
4199 continue;
4200 index_srcdev = i;
4201 found = 1;
4202 physical_of_found =
4203 tmp_bbio->stripes[i].physical;
4204 }
4205 }
4206
4207 if (found) {
4208 mirror_num = index_srcdev + 1;
4209 patch_the_first_stripe_for_dev_replace = 1;
4210 physical_to_patch_in_first_stripe = physical_of_found;
4211 } else {
4212 WARN_ON(1);
4213 ret = -EIO;
4214 kfree(tmp_bbio);
4215 goto out;
4216 }
4217
4218 kfree(tmp_bbio);
4219 } else if (mirror_num > map->num_stripes) {
4220 mirror_num = 0;
4221 }
4222
4223 num_stripes = 1;
4224 stripe_index = 0;
4225 stripe_nr_orig = stripe_nr;
4226 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
4227 (~(map->stripe_len - 1));
4228 do_div(stripe_nr_end, map->stripe_len);
4229 stripe_end_offset = stripe_nr_end * map->stripe_len -
4230 (offset + *length);
4231 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4232 if (rw & REQ_DISCARD)
4233 num_stripes = min_t(u64, map->num_stripes,
4234 stripe_nr_end - stripe_nr_orig);
4235 stripe_index = do_div(stripe_nr, map->num_stripes);
4236 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4237 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4238 num_stripes = map->num_stripes;
4239 else if (mirror_num)
4240 stripe_index = mirror_num - 1;
4241 else {
4242 stripe_index = find_live_mirror(fs_info, map, 0,
4243 map->num_stripes,
4244 current->pid % map->num_stripes,
4245 dev_replace_is_ongoing);
4246 mirror_num = stripe_index + 1;
4247 }
4248
4249 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4250 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4251 num_stripes = map->num_stripes;
4252 } else if (mirror_num) {
4253 stripe_index = mirror_num - 1;
4254 } else {
4255 mirror_num = 1;
4256 }
4257
4258 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4259 int factor = map->num_stripes / map->sub_stripes;
4260
4261 stripe_index = do_div(stripe_nr, factor);
4262 stripe_index *= map->sub_stripes;
4263
4264 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4265 num_stripes = map->sub_stripes;
4266 else if (rw & REQ_DISCARD)
4267 num_stripes = min_t(u64, map->sub_stripes *
4268 (stripe_nr_end - stripe_nr_orig),
4269 map->num_stripes);
4270 else if (mirror_num)
4271 stripe_index += mirror_num - 1;
4272 else {
4273 int old_stripe_index = stripe_index;
4274 stripe_index = find_live_mirror(fs_info, map,
4275 stripe_index,
4276 map->sub_stripes, stripe_index +
4277 current->pid % map->sub_stripes,
4278 dev_replace_is_ongoing);
4279 mirror_num = stripe_index - old_stripe_index + 1;
4280 }
4281 } else {
4282 /*
4283 * after this do_div call, stripe_nr is the number of stripes
4284 * on this device we have to walk to find the data, and
4285 * stripe_index is the number of our device in the stripe array
4286 */
4287 stripe_index = do_div(stripe_nr, map->num_stripes);
4288 mirror_num = stripe_index + 1;
4289 }
4290 BUG_ON(stripe_index >= map->num_stripes);
4291
4292 num_alloc_stripes = num_stripes;
4293 if (dev_replace_is_ongoing) {
4294 if (rw & (REQ_WRITE | REQ_DISCARD))
4295 num_alloc_stripes <<= 1;
4296 if (rw & REQ_GET_READ_MIRRORS)
4297 num_alloc_stripes++;
4298 }
4299 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4300 if (!bbio) {
4301 ret = -ENOMEM;
4302 goto out;
4303 }
4304 atomic_set(&bbio->error, 0);
4305
4306 if (rw & REQ_DISCARD) {
4307 int factor = 0;
4308 int sub_stripes = 0;
4309 u64 stripes_per_dev = 0;
4310 u32 remaining_stripes = 0;
4311 u32 last_stripe = 0;
4312
4313 if (map->type &
4314 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4315 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4316 sub_stripes = 1;
4317 else
4318 sub_stripes = map->sub_stripes;
4319
4320 factor = map->num_stripes / sub_stripes;
4321 stripes_per_dev = div_u64_rem(stripe_nr_end -
4322 stripe_nr_orig,
4323 factor,
4324 &remaining_stripes);
4325 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4326 last_stripe *= sub_stripes;
4327 }
4328
4329 for (i = 0; i < num_stripes; i++) {
4330 bbio->stripes[i].physical =
4331 map->stripes[stripe_index].physical +
4332 stripe_offset + stripe_nr * map->stripe_len;
4333 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4334
4335 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4336 BTRFS_BLOCK_GROUP_RAID10)) {
4337 bbio->stripes[i].length = stripes_per_dev *
4338 map->stripe_len;
4339
4340 if (i / sub_stripes < remaining_stripes)
4341 bbio->stripes[i].length +=
4342 map->stripe_len;
4343
4344 /*
4345 * Special for the first stripe and
4346 * the last stripe:
4347 *
4348 * |-------|...|-------|
4349 * |----------|
4350 * off end_off
4351 */
4352 if (i < sub_stripes)
4353 bbio->stripes[i].length -=
4354 stripe_offset;
4355
4356 if (stripe_index >= last_stripe &&
4357 stripe_index <= (last_stripe +
4358 sub_stripes - 1))
4359 bbio->stripes[i].length -=
4360 stripe_end_offset;
4361
4362 if (i == sub_stripes - 1)
4363 stripe_offset = 0;
4364 } else
4365 bbio->stripes[i].length = *length;
4366
4367 stripe_index++;
4368 if (stripe_index == map->num_stripes) {
4369 /* This could only happen for RAID0/10 */
4370 stripe_index = 0;
4371 stripe_nr++;
4372 }
4373 }
4374 } else {
4375 for (i = 0; i < num_stripes; i++) {
4376 bbio->stripes[i].physical =
4377 map->stripes[stripe_index].physical +
4378 stripe_offset +
4379 stripe_nr * map->stripe_len;
4380 bbio->stripes[i].dev =
4381 map->stripes[stripe_index].dev;
4382 stripe_index++;
4383 }
4384 }
4385
4386 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4387 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4388 BTRFS_BLOCK_GROUP_RAID10 |
4389 BTRFS_BLOCK_GROUP_DUP)) {
4390 max_errors = 1;
4391 }
4392 }
4393
4394 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4395 dev_replace->tgtdev != NULL) {
4396 int index_where_to_add;
4397 u64 srcdev_devid = dev_replace->srcdev->devid;
4398
4399 /*
4400 * duplicate the write operations while the dev replace
4401 * procedure is running. Since the copying of the old disk
4402 * to the new disk takes place at run time while the
4403 * filesystem is mounted writable, the regular write
4404 * operations to the old disk have to be duplicated to go
4405 * to the new disk as well.
4406 * Note that device->missing is handled by the caller, and
4407 * that the write to the old disk is already set up in the
4408 * stripes array.
4409 */
4410 index_where_to_add = num_stripes;
4411 for (i = 0; i < num_stripes; i++) {
4412 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4413 /* write to new disk, too */
4414 struct btrfs_bio_stripe *new =
4415 bbio->stripes + index_where_to_add;
4416 struct btrfs_bio_stripe *old =
4417 bbio->stripes + i;
4418
4419 new->physical = old->physical;
4420 new->length = old->length;
4421 new->dev = dev_replace->tgtdev;
4422 index_where_to_add++;
4423 max_errors++;
4424 }
4425 }
4426 num_stripes = index_where_to_add;
4427 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4428 dev_replace->tgtdev != NULL) {
4429 u64 srcdev_devid = dev_replace->srcdev->devid;
4430 int index_srcdev = 0;
4431 int found = 0;
4432 u64 physical_of_found = 0;
4433
4434 /*
4435 * During the dev-replace procedure, the target drive can
4436 * also be used to read data in case it is needed to repair
4437 * a corrupt block elsewhere. This is possible if the
4438 * requested area is left of the left cursor. In this area,
4439 * the target drive is a full copy of the source drive.
4440 */
4441 for (i = 0; i < num_stripes; i++) {
4442 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4443 /*
4444 * In case of DUP, in order to keep it
4445 * simple, only add the mirror with the
4446 * lowest physical address
4447 */
4448 if (found &&
4449 physical_of_found <=
4450 bbio->stripes[i].physical)
4451 continue;
4452 index_srcdev = i;
4453 found = 1;
4454 physical_of_found = bbio->stripes[i].physical;
4455 }
4456 }
4457 if (found) {
4458 u64 length = map->stripe_len;
4459
4460 if (physical_of_found + length <=
4461 dev_replace->cursor_left) {
4462 struct btrfs_bio_stripe *tgtdev_stripe =
4463 bbio->stripes + num_stripes;
4464
4465 tgtdev_stripe->physical = physical_of_found;
4466 tgtdev_stripe->length =
4467 bbio->stripes[index_srcdev].length;
4468 tgtdev_stripe->dev = dev_replace->tgtdev;
4469
4470 num_stripes++;
4471 }
4472 }
4473 }
4474
4475 *bbio_ret = bbio;
4476 bbio->num_stripes = num_stripes;
4477 bbio->max_errors = max_errors;
4478 bbio->mirror_num = mirror_num;
4479
4480 /*
4481 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4482 * mirror_num == num_stripes + 1 && dev_replace target drive is
4483 * available as a mirror
4484 */
4485 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4486 WARN_ON(num_stripes > 1);
4487 bbio->stripes[0].dev = dev_replace->tgtdev;
4488 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4489 bbio->mirror_num = map->num_stripes + 1;
4490 }
4491 out:
4492 if (dev_replace_is_ongoing)
4493 btrfs_dev_replace_unlock(dev_replace);
4494 free_extent_map(em);
4495 return ret;
4496 }
4497
4498 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4499 u64 logical, u64 *length,
4500 struct btrfs_bio **bbio_ret, int mirror_num)
4501 {
4502 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4503 mirror_num);
4504 }
4505
4506 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4507 u64 chunk_start, u64 physical, u64 devid,
4508 u64 **logical, int *naddrs, int *stripe_len)
4509 {
4510 struct extent_map_tree *em_tree = &map_tree->map_tree;
4511 struct extent_map *em;
4512 struct map_lookup *map;
4513 u64 *buf;
4514 u64 bytenr;
4515 u64 length;
4516 u64 stripe_nr;
4517 int i, j, nr = 0;
4518
4519 read_lock(&em_tree->lock);
4520 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4521 read_unlock(&em_tree->lock);
4522
4523 BUG_ON(!em || em->start != chunk_start);
4524 map = (struct map_lookup *)em->bdev;
4525
4526 length = em->len;
4527 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4528 do_div(length, map->num_stripes / map->sub_stripes);
4529 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4530 do_div(length, map->num_stripes);
4531
4532 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4533 BUG_ON(!buf); /* -ENOMEM */
4534
4535 for (i = 0; i < map->num_stripes; i++) {
4536 if (devid && map->stripes[i].dev->devid != devid)
4537 continue;
4538 if (map->stripes[i].physical > physical ||
4539 map->stripes[i].physical + length <= physical)
4540 continue;
4541
4542 stripe_nr = physical - map->stripes[i].physical;
4543 do_div(stripe_nr, map->stripe_len);
4544
4545 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4546 stripe_nr = stripe_nr * map->num_stripes + i;
4547 do_div(stripe_nr, map->sub_stripes);
4548 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4549 stripe_nr = stripe_nr * map->num_stripes + i;
4550 }
4551 bytenr = chunk_start + stripe_nr * map->stripe_len;
4552 WARN_ON(nr >= map->num_stripes);
4553 for (j = 0; j < nr; j++) {
4554 if (buf[j] == bytenr)
4555 break;
4556 }
4557 if (j == nr) {
4558 WARN_ON(nr >= map->num_stripes);
4559 buf[nr++] = bytenr;
4560 }
4561 }
4562
4563 *logical = buf;
4564 *naddrs = nr;
4565 *stripe_len = map->stripe_len;
4566
4567 free_extent_map(em);
4568 return 0;
4569 }
4570
4571 static void *merge_stripe_index_into_bio_private(void *bi_private,
4572 unsigned int stripe_index)
4573 {
4574 /*
4575 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4576 * at most 1.
4577 * The alternative solution (instead of stealing bits from the
4578 * pointer) would be to allocate an intermediate structure
4579 * that contains the old private pointer plus the stripe_index.
4580 */
4581 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4582 BUG_ON(stripe_index > 3);
4583 return (void *)(((uintptr_t)bi_private) | stripe_index);
4584 }
4585
4586 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4587 {
4588 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4589 }
4590
4591 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4592 {
4593 return (unsigned int)((uintptr_t)bi_private) & 3;
4594 }
4595
4596 static void btrfs_end_bio(struct bio *bio, int err)
4597 {
4598 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4599 int is_orig_bio = 0;
4600
4601 if (err) {
4602 atomic_inc(&bbio->error);
4603 if (err == -EIO || err == -EREMOTEIO) {
4604 unsigned int stripe_index =
4605 extract_stripe_index_from_bio_private(
4606 bio->bi_private);
4607 struct btrfs_device *dev;
4608
4609 BUG_ON(stripe_index >= bbio->num_stripes);
4610 dev = bbio->stripes[stripe_index].dev;
4611 if (dev->bdev) {
4612 if (bio->bi_rw & WRITE)
4613 btrfs_dev_stat_inc(dev,
4614 BTRFS_DEV_STAT_WRITE_ERRS);
4615 else
4616 btrfs_dev_stat_inc(dev,
4617 BTRFS_DEV_STAT_READ_ERRS);
4618 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4619 btrfs_dev_stat_inc(dev,
4620 BTRFS_DEV_STAT_FLUSH_ERRS);
4621 btrfs_dev_stat_print_on_error(dev);
4622 }
4623 }
4624 }
4625
4626 if (bio == bbio->orig_bio)
4627 is_orig_bio = 1;
4628
4629 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4630 if (!is_orig_bio) {
4631 bio_put(bio);
4632 bio = bbio->orig_bio;
4633 }
4634 bio->bi_private = bbio->private;
4635 bio->bi_end_io = bbio->end_io;
4636 bio->bi_bdev = (struct block_device *)
4637 (unsigned long)bbio->mirror_num;
4638 /* only send an error to the higher layers if it is
4639 * beyond the tolerance of the multi-bio
4640 */
4641 if (atomic_read(&bbio->error) > bbio->max_errors) {
4642 err = -EIO;
4643 } else {
4644 /*
4645 * this bio is actually up to date, we didn't
4646 * go over the max number of errors
4647 */
4648 set_bit(BIO_UPTODATE, &bio->bi_flags);
4649 err = 0;
4650 }
4651 kfree(bbio);
4652
4653 bio_endio(bio, err);
4654 } else if (!is_orig_bio) {
4655 bio_put(bio);
4656 }
4657 }
4658
4659 struct async_sched {
4660 struct bio *bio;
4661 int rw;
4662 struct btrfs_fs_info *info;
4663 struct btrfs_work work;
4664 };
4665
4666 /*
4667 * see run_scheduled_bios for a description of why bios are collected for
4668 * async submit.
4669 *
4670 * This will add one bio to the pending list for a device and make sure
4671 * the work struct is scheduled.
4672 */
4673 static noinline void schedule_bio(struct btrfs_root *root,
4674 struct btrfs_device *device,
4675 int rw, struct bio *bio)
4676 {
4677 int should_queue = 1;
4678 struct btrfs_pending_bios *pending_bios;
4679
4680 /* don't bother with additional async steps for reads, right now */
4681 if (!(rw & REQ_WRITE)) {
4682 bio_get(bio);
4683 btrfsic_submit_bio(rw, bio);
4684 bio_put(bio);
4685 return;
4686 }
4687
4688 /*
4689 * nr_async_bios allows us to reliably return congestion to the
4690 * higher layers. Otherwise, the async bio makes it appear we have
4691 * made progress against dirty pages when we've really just put it
4692 * on a queue for later
4693 */
4694 atomic_inc(&root->fs_info->nr_async_bios);
4695 WARN_ON(bio->bi_next);
4696 bio->bi_next = NULL;
4697 bio->bi_rw |= rw;
4698
4699 spin_lock(&device->io_lock);
4700 if (bio->bi_rw & REQ_SYNC)
4701 pending_bios = &device->pending_sync_bios;
4702 else
4703 pending_bios = &device->pending_bios;
4704
4705 if (pending_bios->tail)
4706 pending_bios->tail->bi_next = bio;
4707
4708 pending_bios->tail = bio;
4709 if (!pending_bios->head)
4710 pending_bios->head = bio;
4711 if (device->running_pending)
4712 should_queue = 0;
4713
4714 spin_unlock(&device->io_lock);
4715
4716 if (should_queue)
4717 btrfs_queue_worker(&root->fs_info->submit_workers,
4718 &device->work);
4719 }
4720
4721 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4722 sector_t sector)
4723 {
4724 struct bio_vec *prev;
4725 struct request_queue *q = bdev_get_queue(bdev);
4726 unsigned short max_sectors = queue_max_sectors(q);
4727 struct bvec_merge_data bvm = {
4728 .bi_bdev = bdev,
4729 .bi_sector = sector,
4730 .bi_rw = bio->bi_rw,
4731 };
4732
4733 if (bio->bi_vcnt == 0) {
4734 WARN_ON(1);
4735 return 1;
4736 }
4737
4738 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4739 if ((bio->bi_size >> 9) > max_sectors)
4740 return 0;
4741
4742 if (!q->merge_bvec_fn)
4743 return 1;
4744
4745 bvm.bi_size = bio->bi_size - prev->bv_len;
4746 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4747 return 0;
4748 return 1;
4749 }
4750
4751 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4752 struct bio *bio, u64 physical, int dev_nr,
4753 int rw, int async)
4754 {
4755 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4756
4757 bio->bi_private = bbio;
4758 bio->bi_private = merge_stripe_index_into_bio_private(
4759 bio->bi_private, (unsigned int)dev_nr);
4760 bio->bi_end_io = btrfs_end_bio;
4761 bio->bi_sector = physical >> 9;
4762 #ifdef DEBUG
4763 {
4764 struct rcu_string *name;
4765
4766 rcu_read_lock();
4767 name = rcu_dereference(dev->name);
4768 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4769 "(%s id %llu), size=%u\n", rw,
4770 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4771 name->str, dev->devid, bio->bi_size);
4772 rcu_read_unlock();
4773 }
4774 #endif
4775 bio->bi_bdev = dev->bdev;
4776 if (async)
4777 schedule_bio(root, dev, rw, bio);
4778 else
4779 btrfsic_submit_bio(rw, bio);
4780 }
4781
4782 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4783 struct bio *first_bio, struct btrfs_device *dev,
4784 int dev_nr, int rw, int async)
4785 {
4786 struct bio_vec *bvec = first_bio->bi_io_vec;
4787 struct bio *bio;
4788 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4789 u64 physical = bbio->stripes[dev_nr].physical;
4790
4791 again:
4792 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4793 if (!bio)
4794 return -ENOMEM;
4795
4796 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4797 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4798 bvec->bv_offset) < bvec->bv_len) {
4799 u64 len = bio->bi_size;
4800
4801 atomic_inc(&bbio->stripes_pending);
4802 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4803 rw, async);
4804 physical += len;
4805 goto again;
4806 }
4807 bvec++;
4808 }
4809
4810 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4811 return 0;
4812 }
4813
4814 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4815 {
4816 atomic_inc(&bbio->error);
4817 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4818 bio->bi_private = bbio->private;
4819 bio->bi_end_io = bbio->end_io;
4820 bio->bi_bdev = (struct block_device *)
4821 (unsigned long)bbio->mirror_num;
4822 bio->bi_sector = logical >> 9;
4823 kfree(bbio);
4824 bio_endio(bio, -EIO);
4825 }
4826 }
4827
4828 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4829 int mirror_num, int async_submit)
4830 {
4831 struct btrfs_device *dev;
4832 struct bio *first_bio = bio;
4833 u64 logical = (u64)bio->bi_sector << 9;
4834 u64 length = 0;
4835 u64 map_length;
4836 int ret;
4837 int dev_nr = 0;
4838 int total_devs = 1;
4839 struct btrfs_bio *bbio = NULL;
4840
4841 length = bio->bi_size;
4842 map_length = length;
4843
4844 ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
4845 mirror_num);
4846 if (ret)
4847 return ret;
4848
4849 total_devs = bbio->num_stripes;
4850 if (map_length < length) {
4851 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4852 "len %llu\n", (unsigned long long)logical,
4853 (unsigned long long)length,
4854 (unsigned long long)map_length);
4855 BUG();
4856 }
4857
4858 bbio->orig_bio = first_bio;
4859 bbio->private = first_bio->bi_private;
4860 bbio->end_io = first_bio->bi_end_io;
4861 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4862
4863 while (dev_nr < total_devs) {
4864 dev = bbio->stripes[dev_nr].dev;
4865 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4866 bbio_error(bbio, first_bio, logical);
4867 dev_nr++;
4868 continue;
4869 }
4870
4871 /*
4872 * Check and see if we're ok with this bio based on it's size
4873 * and offset with the given device.
4874 */
4875 if (!bio_size_ok(dev->bdev, first_bio,
4876 bbio->stripes[dev_nr].physical >> 9)) {
4877 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4878 dev_nr, rw, async_submit);
4879 BUG_ON(ret);
4880 dev_nr++;
4881 continue;
4882 }
4883
4884 if (dev_nr < total_devs - 1) {
4885 bio = bio_clone(first_bio, GFP_NOFS);
4886 BUG_ON(!bio); /* -ENOMEM */
4887 } else {
4888 bio = first_bio;
4889 }
4890
4891 submit_stripe_bio(root, bbio, bio,
4892 bbio->stripes[dev_nr].physical, dev_nr, rw,
4893 async_submit);
4894 dev_nr++;
4895 }
4896 return 0;
4897 }
4898
4899 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
4900 u8 *uuid, u8 *fsid)
4901 {
4902 struct btrfs_device *device;
4903 struct btrfs_fs_devices *cur_devices;
4904
4905 cur_devices = fs_info->fs_devices;
4906 while (cur_devices) {
4907 if (!fsid ||
4908 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4909 device = __find_device(&cur_devices->devices,
4910 devid, uuid);
4911 if (device)
4912 return device;
4913 }
4914 cur_devices = cur_devices->seed;
4915 }
4916 return NULL;
4917 }
4918
4919 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4920 u64 devid, u8 *dev_uuid)
4921 {
4922 struct btrfs_device *device;
4923 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4924
4925 device = kzalloc(sizeof(*device), GFP_NOFS);
4926 if (!device)
4927 return NULL;
4928 list_add(&device->dev_list,
4929 &fs_devices->devices);
4930 device->dev_root = root->fs_info->dev_root;
4931 device->devid = devid;
4932 device->work.func = pending_bios_fn;
4933 device->fs_devices = fs_devices;
4934 device->missing = 1;
4935 fs_devices->num_devices++;
4936 fs_devices->missing_devices++;
4937 spin_lock_init(&device->io_lock);
4938 INIT_LIST_HEAD(&device->dev_alloc_list);
4939 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4940 return device;
4941 }
4942
4943 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4944 struct extent_buffer *leaf,
4945 struct btrfs_chunk *chunk)
4946 {
4947 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4948 struct map_lookup *map;
4949 struct extent_map *em;
4950 u64 logical;
4951 u64 length;
4952 u64 devid;
4953 u8 uuid[BTRFS_UUID_SIZE];
4954 int num_stripes;
4955 int ret;
4956 int i;
4957
4958 logical = key->offset;
4959 length = btrfs_chunk_length(leaf, chunk);
4960
4961 read_lock(&map_tree->map_tree.lock);
4962 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4963 read_unlock(&map_tree->map_tree.lock);
4964
4965 /* already mapped? */
4966 if (em && em->start <= logical && em->start + em->len > logical) {
4967 free_extent_map(em);
4968 return 0;
4969 } else if (em) {
4970 free_extent_map(em);
4971 }
4972
4973 em = alloc_extent_map();
4974 if (!em)
4975 return -ENOMEM;
4976 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4977 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4978 if (!map) {
4979 free_extent_map(em);
4980 return -ENOMEM;
4981 }
4982
4983 em->bdev = (struct block_device *)map;
4984 em->start = logical;
4985 em->len = length;
4986 em->block_start = 0;
4987 em->block_len = em->len;
4988
4989 map->num_stripes = num_stripes;
4990 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4991 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4992 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4993 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4994 map->type = btrfs_chunk_type(leaf, chunk);
4995 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4996 for (i = 0; i < num_stripes; i++) {
4997 map->stripes[i].physical =
4998 btrfs_stripe_offset_nr(leaf, chunk, i);
4999 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5000 read_extent_buffer(leaf, uuid, (unsigned long)
5001 btrfs_stripe_dev_uuid_nr(chunk, i),
5002 BTRFS_UUID_SIZE);
5003 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5004 uuid, NULL);
5005 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5006 kfree(map);
5007 free_extent_map(em);
5008 return -EIO;
5009 }
5010 if (!map->stripes[i].dev) {
5011 map->stripes[i].dev =
5012 add_missing_dev(root, devid, uuid);
5013 if (!map->stripes[i].dev) {
5014 kfree(map);
5015 free_extent_map(em);
5016 return -EIO;
5017 }
5018 }
5019 map->stripes[i].dev->in_fs_metadata = 1;
5020 }
5021
5022 write_lock(&map_tree->map_tree.lock);
5023 ret = add_extent_mapping(&map_tree->map_tree, em);
5024 write_unlock(&map_tree->map_tree.lock);
5025 BUG_ON(ret); /* Tree corruption */
5026 free_extent_map(em);
5027
5028 return 0;
5029 }
5030
5031 static void fill_device_from_item(struct extent_buffer *leaf,
5032 struct btrfs_dev_item *dev_item,
5033 struct btrfs_device *device)
5034 {
5035 unsigned long ptr;
5036
5037 device->devid = btrfs_device_id(leaf, dev_item);
5038 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5039 device->total_bytes = device->disk_total_bytes;
5040 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5041 device->type = btrfs_device_type(leaf, dev_item);
5042 device->io_align = btrfs_device_io_align(leaf, dev_item);
5043 device->io_width = btrfs_device_io_width(leaf, dev_item);
5044 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5045 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5046 device->is_tgtdev_for_dev_replace = 0;
5047
5048 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5049 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5050 }
5051
5052 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5053 {
5054 struct btrfs_fs_devices *fs_devices;
5055 int ret;
5056
5057 BUG_ON(!mutex_is_locked(&uuid_mutex));
5058
5059 fs_devices = root->fs_info->fs_devices->seed;
5060 while (fs_devices) {
5061 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5062 ret = 0;
5063 goto out;
5064 }
5065 fs_devices = fs_devices->seed;
5066 }
5067
5068 fs_devices = find_fsid(fsid);
5069 if (!fs_devices) {
5070 ret = -ENOENT;
5071 goto out;
5072 }
5073
5074 fs_devices = clone_fs_devices(fs_devices);
5075 if (IS_ERR(fs_devices)) {
5076 ret = PTR_ERR(fs_devices);
5077 goto out;
5078 }
5079
5080 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5081 root->fs_info->bdev_holder);
5082 if (ret) {
5083 free_fs_devices(fs_devices);
5084 goto out;
5085 }
5086
5087 if (!fs_devices->seeding) {
5088 __btrfs_close_devices(fs_devices);
5089 free_fs_devices(fs_devices);
5090 ret = -EINVAL;
5091 goto out;
5092 }
5093
5094 fs_devices->seed = root->fs_info->fs_devices->seed;
5095 root->fs_info->fs_devices->seed = fs_devices;
5096 out:
5097 return ret;
5098 }
5099
5100 static int read_one_dev(struct btrfs_root *root,
5101 struct extent_buffer *leaf,
5102 struct btrfs_dev_item *dev_item)
5103 {
5104 struct btrfs_device *device;
5105 u64 devid;
5106 int ret;
5107 u8 fs_uuid[BTRFS_UUID_SIZE];
5108 u8 dev_uuid[BTRFS_UUID_SIZE];
5109
5110 devid = btrfs_device_id(leaf, dev_item);
5111 read_extent_buffer(leaf, dev_uuid,
5112 (unsigned long)btrfs_device_uuid(dev_item),
5113 BTRFS_UUID_SIZE);
5114 read_extent_buffer(leaf, fs_uuid,
5115 (unsigned long)btrfs_device_fsid(dev_item),
5116 BTRFS_UUID_SIZE);
5117
5118 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5119 ret = open_seed_devices(root, fs_uuid);
5120 if (ret && !btrfs_test_opt(root, DEGRADED))
5121 return ret;
5122 }
5123
5124 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5125 if (!device || !device->bdev) {
5126 if (!btrfs_test_opt(root, DEGRADED))
5127 return -EIO;
5128
5129 if (!device) {
5130 printk(KERN_WARNING "warning devid %llu missing\n",
5131 (unsigned long long)devid);
5132 device = add_missing_dev(root, devid, dev_uuid);
5133 if (!device)
5134 return -ENOMEM;
5135 } else if (!device->missing) {
5136 /*
5137 * this happens when a device that was properly setup
5138 * in the device info lists suddenly goes bad.
5139 * device->bdev is NULL, and so we have to set
5140 * device->missing to one here
5141 */
5142 root->fs_info->fs_devices->missing_devices++;
5143 device->missing = 1;
5144 }
5145 }
5146
5147 if (device->fs_devices != root->fs_info->fs_devices) {
5148 BUG_ON(device->writeable);
5149 if (device->generation !=
5150 btrfs_device_generation(leaf, dev_item))
5151 return -EINVAL;
5152 }
5153
5154 fill_device_from_item(leaf, dev_item, device);
5155 device->dev_root = root->fs_info->dev_root;
5156 device->in_fs_metadata = 1;
5157 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5158 device->fs_devices->total_rw_bytes += device->total_bytes;
5159 spin_lock(&root->fs_info->free_chunk_lock);
5160 root->fs_info->free_chunk_space += device->total_bytes -
5161 device->bytes_used;
5162 spin_unlock(&root->fs_info->free_chunk_lock);
5163 }
5164 ret = 0;
5165 return ret;
5166 }
5167
5168 int btrfs_read_sys_array(struct btrfs_root *root)
5169 {
5170 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5171 struct extent_buffer *sb;
5172 struct btrfs_disk_key *disk_key;
5173 struct btrfs_chunk *chunk;
5174 u8 *ptr;
5175 unsigned long sb_ptr;
5176 int ret = 0;
5177 u32 num_stripes;
5178 u32 array_size;
5179 u32 len = 0;
5180 u32 cur;
5181 struct btrfs_key key;
5182
5183 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5184 BTRFS_SUPER_INFO_SIZE);
5185 if (!sb)
5186 return -ENOMEM;
5187 btrfs_set_buffer_uptodate(sb);
5188 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5189 /*
5190 * The sb extent buffer is artifical and just used to read the system array.
5191 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5192 * pages up-to-date when the page is larger: extent does not cover the
5193 * whole page and consequently check_page_uptodate does not find all
5194 * the page's extents up-to-date (the hole beyond sb),
5195 * write_extent_buffer then triggers a WARN_ON.
5196 *
5197 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5198 * but sb spans only this function. Add an explicit SetPageUptodate call
5199 * to silence the warning eg. on PowerPC 64.
5200 */
5201 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5202 SetPageUptodate(sb->pages[0]);
5203
5204 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5205 array_size = btrfs_super_sys_array_size(super_copy);
5206
5207 ptr = super_copy->sys_chunk_array;
5208 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5209 cur = 0;
5210
5211 while (cur < array_size) {
5212 disk_key = (struct btrfs_disk_key *)ptr;
5213 btrfs_disk_key_to_cpu(&key, disk_key);
5214
5215 len = sizeof(*disk_key); ptr += len;
5216 sb_ptr += len;
5217 cur += len;
5218
5219 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5220 chunk = (struct btrfs_chunk *)sb_ptr;
5221 ret = read_one_chunk(root, &key, sb, chunk);
5222 if (ret)
5223 break;
5224 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5225 len = btrfs_chunk_item_size(num_stripes);
5226 } else {
5227 ret = -EIO;
5228 break;
5229 }
5230 ptr += len;
5231 sb_ptr += len;
5232 cur += len;
5233 }
5234 free_extent_buffer(sb);
5235 return ret;
5236 }
5237
5238 int btrfs_read_chunk_tree(struct btrfs_root *root)
5239 {
5240 struct btrfs_path *path;
5241 struct extent_buffer *leaf;
5242 struct btrfs_key key;
5243 struct btrfs_key found_key;
5244 int ret;
5245 int slot;
5246
5247 root = root->fs_info->chunk_root;
5248
5249 path = btrfs_alloc_path();
5250 if (!path)
5251 return -ENOMEM;
5252
5253 mutex_lock(&uuid_mutex);
5254 lock_chunks(root);
5255
5256 /* first we search for all of the device items, and then we
5257 * read in all of the chunk items. This way we can create chunk
5258 * mappings that reference all of the devices that are afound
5259 */
5260 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5261 key.offset = 0;
5262 key.type = 0;
5263 again:
5264 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5265 if (ret < 0)
5266 goto error;
5267 while (1) {
5268 leaf = path->nodes[0];
5269 slot = path->slots[0];
5270 if (slot >= btrfs_header_nritems(leaf)) {
5271 ret = btrfs_next_leaf(root, path);
5272 if (ret == 0)
5273 continue;
5274 if (ret < 0)
5275 goto error;
5276 break;
5277 }
5278 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5279 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5280 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5281 break;
5282 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5283 struct btrfs_dev_item *dev_item;
5284 dev_item = btrfs_item_ptr(leaf, slot,
5285 struct btrfs_dev_item);
5286 ret = read_one_dev(root, leaf, dev_item);
5287 if (ret)
5288 goto error;
5289 }
5290 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5291 struct btrfs_chunk *chunk;
5292 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5293 ret = read_one_chunk(root, &found_key, leaf, chunk);
5294 if (ret)
5295 goto error;
5296 }
5297 path->slots[0]++;
5298 }
5299 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5300 key.objectid = 0;
5301 btrfs_release_path(path);
5302 goto again;
5303 }
5304 ret = 0;
5305 error:
5306 unlock_chunks(root);
5307 mutex_unlock(&uuid_mutex);
5308
5309 btrfs_free_path(path);
5310 return ret;
5311 }
5312
5313 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5314 {
5315 int i;
5316
5317 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5318 btrfs_dev_stat_reset(dev, i);
5319 }
5320
5321 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5322 {
5323 struct btrfs_key key;
5324 struct btrfs_key found_key;
5325 struct btrfs_root *dev_root = fs_info->dev_root;
5326 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5327 struct extent_buffer *eb;
5328 int slot;
5329 int ret = 0;
5330 struct btrfs_device *device;
5331 struct btrfs_path *path = NULL;
5332 int i;
5333
5334 path = btrfs_alloc_path();
5335 if (!path) {
5336 ret = -ENOMEM;
5337 goto out;
5338 }
5339
5340 mutex_lock(&fs_devices->device_list_mutex);
5341 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5342 int item_size;
5343 struct btrfs_dev_stats_item *ptr;
5344
5345 key.objectid = 0;
5346 key.type = BTRFS_DEV_STATS_KEY;
5347 key.offset = device->devid;
5348 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5349 if (ret) {
5350 __btrfs_reset_dev_stats(device);
5351 device->dev_stats_valid = 1;
5352 btrfs_release_path(path);
5353 continue;
5354 }
5355 slot = path->slots[0];
5356 eb = path->nodes[0];
5357 btrfs_item_key_to_cpu(eb, &found_key, slot);
5358 item_size = btrfs_item_size_nr(eb, slot);
5359
5360 ptr = btrfs_item_ptr(eb, slot,
5361 struct btrfs_dev_stats_item);
5362
5363 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5364 if (item_size >= (1 + i) * sizeof(__le64))
5365 btrfs_dev_stat_set(device, i,
5366 btrfs_dev_stats_value(eb, ptr, i));
5367 else
5368 btrfs_dev_stat_reset(device, i);
5369 }
5370
5371 device->dev_stats_valid = 1;
5372 btrfs_dev_stat_print_on_load(device);
5373 btrfs_release_path(path);
5374 }
5375 mutex_unlock(&fs_devices->device_list_mutex);
5376
5377 out:
5378 btrfs_free_path(path);
5379 return ret < 0 ? ret : 0;
5380 }
5381
5382 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5383 struct btrfs_root *dev_root,
5384 struct btrfs_device *device)
5385 {
5386 struct btrfs_path *path;
5387 struct btrfs_key key;
5388 struct extent_buffer *eb;
5389 struct btrfs_dev_stats_item *ptr;
5390 int ret;
5391 int i;
5392
5393 key.objectid = 0;
5394 key.type = BTRFS_DEV_STATS_KEY;
5395 key.offset = device->devid;
5396
5397 path = btrfs_alloc_path();
5398 BUG_ON(!path);
5399 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5400 if (ret < 0) {
5401 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5402 ret, rcu_str_deref(device->name));
5403 goto out;
5404 }
5405
5406 if (ret == 0 &&
5407 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5408 /* need to delete old one and insert a new one */
5409 ret = btrfs_del_item(trans, dev_root, path);
5410 if (ret != 0) {
5411 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5412 rcu_str_deref(device->name), ret);
5413 goto out;
5414 }
5415 ret = 1;
5416 }
5417
5418 if (ret == 1) {
5419 /* need to insert a new item */
5420 btrfs_release_path(path);
5421 ret = btrfs_insert_empty_item(trans, dev_root, path,
5422 &key, sizeof(*ptr));
5423 if (ret < 0) {
5424 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5425 rcu_str_deref(device->name), ret);
5426 goto out;
5427 }
5428 }
5429
5430 eb = path->nodes[0];
5431 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5432 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5433 btrfs_set_dev_stats_value(eb, ptr, i,
5434 btrfs_dev_stat_read(device, i));
5435 btrfs_mark_buffer_dirty(eb);
5436
5437 out:
5438 btrfs_free_path(path);
5439 return ret;
5440 }
5441
5442 /*
5443 * called from commit_transaction. Writes all changed device stats to disk.
5444 */
5445 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5446 struct btrfs_fs_info *fs_info)
5447 {
5448 struct btrfs_root *dev_root = fs_info->dev_root;
5449 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5450 struct btrfs_device *device;
5451 int ret = 0;
5452
5453 mutex_lock(&fs_devices->device_list_mutex);
5454 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5455 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5456 continue;
5457
5458 ret = update_dev_stat_item(trans, dev_root, device);
5459 if (!ret)
5460 device->dev_stats_dirty = 0;
5461 }
5462 mutex_unlock(&fs_devices->device_list_mutex);
5463
5464 return ret;
5465 }
5466
5467 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5468 {
5469 btrfs_dev_stat_inc(dev, index);
5470 btrfs_dev_stat_print_on_error(dev);
5471 }
5472
5473 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5474 {
5475 if (!dev->dev_stats_valid)
5476 return;
5477 printk_ratelimited_in_rcu(KERN_ERR
5478 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5479 rcu_str_deref(dev->name),
5480 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5481 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5482 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5483 btrfs_dev_stat_read(dev,
5484 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5485 btrfs_dev_stat_read(dev,
5486 BTRFS_DEV_STAT_GENERATION_ERRS));
5487 }
5488
5489 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5490 {
5491 int i;
5492
5493 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5494 if (btrfs_dev_stat_read(dev, i) != 0)
5495 break;
5496 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5497 return; /* all values == 0, suppress message */
5498
5499 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5500 rcu_str_deref(dev->name),
5501 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5502 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5503 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5504 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5505 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5506 }
5507
5508 int btrfs_get_dev_stats(struct btrfs_root *root,
5509 struct btrfs_ioctl_get_dev_stats *stats)
5510 {
5511 struct btrfs_device *dev;
5512 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5513 int i;
5514
5515 mutex_lock(&fs_devices->device_list_mutex);
5516 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5517 mutex_unlock(&fs_devices->device_list_mutex);
5518
5519 if (!dev) {
5520 printk(KERN_WARNING
5521 "btrfs: get dev_stats failed, device not found\n");
5522 return -ENODEV;
5523 } else if (!dev->dev_stats_valid) {
5524 printk(KERN_WARNING
5525 "btrfs: get dev_stats failed, not yet valid\n");
5526 return -ENODEV;
5527 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5528 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5529 if (stats->nr_items > i)
5530 stats->values[i] =
5531 btrfs_dev_stat_read_and_reset(dev, i);
5532 else
5533 btrfs_dev_stat_reset(dev, i);
5534 }
5535 } else {
5536 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5537 if (stats->nr_items > i)
5538 stats->values[i] = btrfs_dev_stat_read(dev, i);
5539 }
5540 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5541 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5542 return 0;
5543 }
5544
5545 int btrfs_scratch_superblock(struct btrfs_device *device)
5546 {
5547 struct buffer_head *bh;
5548 struct btrfs_super_block *disk_super;
5549
5550 bh = btrfs_read_dev_super(device->bdev);
5551 if (!bh)
5552 return -EINVAL;
5553 disk_super = (struct btrfs_super_block *)bh->b_data;
5554
5555 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5556 set_buffer_dirty(bh);
5557 sync_dirty_buffer(bh);
5558 brelse(bh);
5559
5560 return 0;
5561 }
This page took 0.160298 seconds and 5 git commands to generate.