Btrfs: Simplify device selection for mirrored reads
[deliverable/linux.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <asm/div64.h>
23 #include "ctree.h"
24 #include "extent_map.h"
25 #include "disk-io.h"
26 #include "transaction.h"
27 #include "print-tree.h"
28 #include "volumes.h"
29
30 struct map_lookup {
31 u64 type;
32 int io_align;
33 int io_width;
34 int stripe_len;
35 int sector_size;
36 int num_stripes;
37 int sub_stripes;
38 struct btrfs_bio_stripe stripes[];
39 };
40
41 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
42 (sizeof(struct btrfs_bio_stripe) * (n)))
43
44 static DEFINE_MUTEX(uuid_mutex);
45 static LIST_HEAD(fs_uuids);
46
47 int btrfs_cleanup_fs_uuids(void)
48 {
49 struct btrfs_fs_devices *fs_devices;
50 struct list_head *uuid_cur;
51 struct list_head *devices_cur;
52 struct btrfs_device *dev;
53
54 list_for_each(uuid_cur, &fs_uuids) {
55 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
56 list);
57 while(!list_empty(&fs_devices->devices)) {
58 devices_cur = fs_devices->devices.next;
59 dev = list_entry(devices_cur, struct btrfs_device,
60 dev_list);
61 if (dev->bdev) {
62 close_bdev_excl(dev->bdev);
63 }
64 list_del(&dev->dev_list);
65 kfree(dev);
66 }
67 }
68 return 0;
69 }
70
71 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
72 u8 *uuid)
73 {
74 struct btrfs_device *dev;
75 struct list_head *cur;
76
77 list_for_each(cur, head) {
78 dev = list_entry(cur, struct btrfs_device, dev_list);
79 if (dev->devid == devid &&
80 !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE)) {
81 return dev;
82 }
83 }
84 return NULL;
85 }
86
87 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
88 {
89 struct list_head *cur;
90 struct btrfs_fs_devices *fs_devices;
91
92 list_for_each(cur, &fs_uuids) {
93 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
94 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
95 return fs_devices;
96 }
97 return NULL;
98 }
99
100 static int device_list_add(const char *path,
101 struct btrfs_super_block *disk_super,
102 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
103 {
104 struct btrfs_device *device;
105 struct btrfs_fs_devices *fs_devices;
106 u64 found_transid = btrfs_super_generation(disk_super);
107
108 fs_devices = find_fsid(disk_super->fsid);
109 if (!fs_devices) {
110 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
111 if (!fs_devices)
112 return -ENOMEM;
113 INIT_LIST_HEAD(&fs_devices->devices);
114 list_add(&fs_devices->list, &fs_uuids);
115 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
116 fs_devices->latest_devid = devid;
117 fs_devices->latest_trans = found_transid;
118 fs_devices->lowest_devid = (u64)-1;
119 fs_devices->num_devices = 0;
120 device = NULL;
121 } else {
122 device = __find_device(&fs_devices->devices, devid,
123 disk_super->dev_item.uuid);
124 }
125 if (!device) {
126 device = kzalloc(sizeof(*device), GFP_NOFS);
127 if (!device) {
128 /* we can safely leave the fs_devices entry around */
129 return -ENOMEM;
130 }
131 device->devid = devid;
132 memcpy(device->uuid, disk_super->dev_item.uuid,
133 BTRFS_UUID_SIZE);
134 device->barriers = 1;
135 spin_lock_init(&device->io_lock);
136 device->name = kstrdup(path, GFP_NOFS);
137 if (!device->name) {
138 kfree(device);
139 return -ENOMEM;
140 }
141 list_add(&device->dev_list, &fs_devices->devices);
142 fs_devices->num_devices++;
143 }
144
145 if (found_transid > fs_devices->latest_trans) {
146 fs_devices->latest_devid = devid;
147 fs_devices->latest_trans = found_transid;
148 }
149 if (fs_devices->lowest_devid > devid) {
150 fs_devices->lowest_devid = devid;
151 }
152 *fs_devices_ret = fs_devices;
153 return 0;
154 }
155
156 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
157 {
158 struct list_head *head = &fs_devices->devices;
159 struct list_head *cur;
160 struct btrfs_device *device;
161
162 mutex_lock(&uuid_mutex);
163 list_for_each(cur, head) {
164 device = list_entry(cur, struct btrfs_device, dev_list);
165 if (device->bdev) {
166 close_bdev_excl(device->bdev);
167 }
168 device->bdev = NULL;
169 }
170 mutex_unlock(&uuid_mutex);
171 return 0;
172 }
173
174 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
175 int flags, void *holder)
176 {
177 struct block_device *bdev;
178 struct list_head *head = &fs_devices->devices;
179 struct list_head *cur;
180 struct btrfs_device *device;
181 int ret;
182
183 mutex_lock(&uuid_mutex);
184 list_for_each(cur, head) {
185 device = list_entry(cur, struct btrfs_device, dev_list);
186 bdev = open_bdev_excl(device->name, flags, holder);
187
188 if (IS_ERR(bdev)) {
189 printk("open %s failed\n", device->name);
190 ret = PTR_ERR(bdev);
191 goto fail;
192 }
193 if (device->devid == fs_devices->latest_devid)
194 fs_devices->latest_bdev = bdev;
195 if (device->devid == fs_devices->lowest_devid) {
196 fs_devices->lowest_bdev = bdev;
197 }
198 device->bdev = bdev;
199 }
200 mutex_unlock(&uuid_mutex);
201 return 0;
202 fail:
203 mutex_unlock(&uuid_mutex);
204 btrfs_close_devices(fs_devices);
205 return ret;
206 }
207
208 int btrfs_scan_one_device(const char *path, int flags, void *holder,
209 struct btrfs_fs_devices **fs_devices_ret)
210 {
211 struct btrfs_super_block *disk_super;
212 struct block_device *bdev;
213 struct buffer_head *bh;
214 int ret;
215 u64 devid;
216 u64 transid;
217
218 mutex_lock(&uuid_mutex);
219
220 bdev = open_bdev_excl(path, flags, holder);
221
222 if (IS_ERR(bdev)) {
223 ret = PTR_ERR(bdev);
224 goto error;
225 }
226
227 ret = set_blocksize(bdev, 4096);
228 if (ret)
229 goto error_close;
230 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
231 if (!bh) {
232 ret = -EIO;
233 goto error_close;
234 }
235 disk_super = (struct btrfs_super_block *)bh->b_data;
236 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
237 sizeof(disk_super->magic))) {
238 ret = -EINVAL;
239 goto error_brelse;
240 }
241 devid = le64_to_cpu(disk_super->dev_item.devid);
242 transid = btrfs_super_generation(disk_super);
243 if (disk_super->label[0])
244 printk("device label %s ", disk_super->label);
245 else {
246 /* FIXME, make a readl uuid parser */
247 printk("device fsid %llx-%llx ",
248 *(unsigned long long *)disk_super->fsid,
249 *(unsigned long long *)(disk_super->fsid + 8));
250 }
251 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
252 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
253
254 error_brelse:
255 brelse(bh);
256 error_close:
257 close_bdev_excl(bdev);
258 error:
259 mutex_unlock(&uuid_mutex);
260 return ret;
261 }
262
263 /*
264 * this uses a pretty simple search, the expectation is that it is
265 * called very infrequently and that a given device has a small number
266 * of extents
267 */
268 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
269 struct btrfs_device *device,
270 struct btrfs_path *path,
271 u64 num_bytes, u64 *start)
272 {
273 struct btrfs_key key;
274 struct btrfs_root *root = device->dev_root;
275 struct btrfs_dev_extent *dev_extent = NULL;
276 u64 hole_size = 0;
277 u64 last_byte = 0;
278 u64 search_start = 0;
279 u64 search_end = device->total_bytes;
280 int ret;
281 int slot = 0;
282 int start_found;
283 struct extent_buffer *l;
284
285 start_found = 0;
286 path->reada = 2;
287
288 /* FIXME use last free of some kind */
289
290 /* we don't want to overwrite the superblock on the drive,
291 * so we make sure to start at an offset of at least 1MB
292 */
293 search_start = max((u64)1024 * 1024, search_start);
294 key.objectid = device->devid;
295 key.offset = search_start;
296 key.type = BTRFS_DEV_EXTENT_KEY;
297 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
298 if (ret < 0)
299 goto error;
300 ret = btrfs_previous_item(root, path, 0, key.type);
301 if (ret < 0)
302 goto error;
303 l = path->nodes[0];
304 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
305 while (1) {
306 l = path->nodes[0];
307 slot = path->slots[0];
308 if (slot >= btrfs_header_nritems(l)) {
309 ret = btrfs_next_leaf(root, path);
310 if (ret == 0)
311 continue;
312 if (ret < 0)
313 goto error;
314 no_more_items:
315 if (!start_found) {
316 if (search_start >= search_end) {
317 ret = -ENOSPC;
318 goto error;
319 }
320 *start = search_start;
321 start_found = 1;
322 goto check_pending;
323 }
324 *start = last_byte > search_start ?
325 last_byte : search_start;
326 if (search_end <= *start) {
327 ret = -ENOSPC;
328 goto error;
329 }
330 goto check_pending;
331 }
332 btrfs_item_key_to_cpu(l, &key, slot);
333
334 if (key.objectid < device->devid)
335 goto next;
336
337 if (key.objectid > device->devid)
338 goto no_more_items;
339
340 if (key.offset >= search_start && key.offset > last_byte &&
341 start_found) {
342 if (last_byte < search_start)
343 last_byte = search_start;
344 hole_size = key.offset - last_byte;
345 if (key.offset > last_byte &&
346 hole_size >= num_bytes) {
347 *start = last_byte;
348 goto check_pending;
349 }
350 }
351 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
352 goto next;
353 }
354
355 start_found = 1;
356 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
357 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
358 next:
359 path->slots[0]++;
360 cond_resched();
361 }
362 check_pending:
363 /* we have to make sure we didn't find an extent that has already
364 * been allocated by the map tree or the original allocation
365 */
366 btrfs_release_path(root, path);
367 BUG_ON(*start < search_start);
368
369 if (*start + num_bytes > search_end) {
370 ret = -ENOSPC;
371 goto error;
372 }
373 /* check for pending inserts here */
374 return 0;
375
376 error:
377 btrfs_release_path(root, path);
378 return ret;
379 }
380
381 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
382 struct btrfs_device *device,
383 u64 chunk_tree, u64 chunk_objectid,
384 u64 chunk_offset,
385 u64 num_bytes, u64 *start)
386 {
387 int ret;
388 struct btrfs_path *path;
389 struct btrfs_root *root = device->dev_root;
390 struct btrfs_dev_extent *extent;
391 struct extent_buffer *leaf;
392 struct btrfs_key key;
393
394 path = btrfs_alloc_path();
395 if (!path)
396 return -ENOMEM;
397
398 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
399 if (ret) {
400 goto err;
401 }
402
403 key.objectid = device->devid;
404 key.offset = *start;
405 key.type = BTRFS_DEV_EXTENT_KEY;
406 ret = btrfs_insert_empty_item(trans, root, path, &key,
407 sizeof(*extent));
408 BUG_ON(ret);
409
410 leaf = path->nodes[0];
411 extent = btrfs_item_ptr(leaf, path->slots[0],
412 struct btrfs_dev_extent);
413 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
414 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
415 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
416
417 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
418 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
419 BTRFS_UUID_SIZE);
420
421 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
422 btrfs_mark_buffer_dirty(leaf);
423 err:
424 btrfs_free_path(path);
425 return ret;
426 }
427
428 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
429 {
430 struct btrfs_path *path;
431 int ret;
432 struct btrfs_key key;
433 struct btrfs_chunk *chunk;
434 struct btrfs_key found_key;
435
436 path = btrfs_alloc_path();
437 BUG_ON(!path);
438
439 key.objectid = objectid;
440 key.offset = (u64)-1;
441 key.type = BTRFS_CHUNK_ITEM_KEY;
442
443 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
444 if (ret < 0)
445 goto error;
446
447 BUG_ON(ret == 0);
448
449 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
450 if (ret) {
451 *offset = 0;
452 } else {
453 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
454 path->slots[0]);
455 if (found_key.objectid != objectid)
456 *offset = 0;
457 else {
458 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
459 struct btrfs_chunk);
460 *offset = found_key.offset +
461 btrfs_chunk_length(path->nodes[0], chunk);
462 }
463 }
464 ret = 0;
465 error:
466 btrfs_free_path(path);
467 return ret;
468 }
469
470 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
471 u64 *objectid)
472 {
473 int ret;
474 struct btrfs_key key;
475 struct btrfs_key found_key;
476
477 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
478 key.type = BTRFS_DEV_ITEM_KEY;
479 key.offset = (u64)-1;
480
481 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
482 if (ret < 0)
483 goto error;
484
485 BUG_ON(ret == 0);
486
487 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
488 BTRFS_DEV_ITEM_KEY);
489 if (ret) {
490 *objectid = 1;
491 } else {
492 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
493 path->slots[0]);
494 *objectid = found_key.offset + 1;
495 }
496 ret = 0;
497 error:
498 btrfs_release_path(root, path);
499 return ret;
500 }
501
502 /*
503 * the device information is stored in the chunk root
504 * the btrfs_device struct should be fully filled in
505 */
506 int btrfs_add_device(struct btrfs_trans_handle *trans,
507 struct btrfs_root *root,
508 struct btrfs_device *device)
509 {
510 int ret;
511 struct btrfs_path *path;
512 struct btrfs_dev_item *dev_item;
513 struct extent_buffer *leaf;
514 struct btrfs_key key;
515 unsigned long ptr;
516 u64 free_devid;
517
518 root = root->fs_info->chunk_root;
519
520 path = btrfs_alloc_path();
521 if (!path)
522 return -ENOMEM;
523
524 ret = find_next_devid(root, path, &free_devid);
525 if (ret)
526 goto out;
527
528 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
529 key.type = BTRFS_DEV_ITEM_KEY;
530 key.offset = free_devid;
531
532 ret = btrfs_insert_empty_item(trans, root, path, &key,
533 sizeof(*dev_item));
534 if (ret)
535 goto out;
536
537 leaf = path->nodes[0];
538 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
539
540 device->devid = free_devid;
541 btrfs_set_device_id(leaf, dev_item, device->devid);
542 btrfs_set_device_type(leaf, dev_item, device->type);
543 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
544 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
545 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
546 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
547 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
548 btrfs_set_device_group(leaf, dev_item, 0);
549 btrfs_set_device_seek_speed(leaf, dev_item, 0);
550 btrfs_set_device_bandwidth(leaf, dev_item, 0);
551
552 ptr = (unsigned long)btrfs_device_uuid(dev_item);
553 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
554 btrfs_mark_buffer_dirty(leaf);
555 ret = 0;
556
557 out:
558 btrfs_free_path(path);
559 return ret;
560 }
561 int btrfs_update_device(struct btrfs_trans_handle *trans,
562 struct btrfs_device *device)
563 {
564 int ret;
565 struct btrfs_path *path;
566 struct btrfs_root *root;
567 struct btrfs_dev_item *dev_item;
568 struct extent_buffer *leaf;
569 struct btrfs_key key;
570
571 root = device->dev_root->fs_info->chunk_root;
572
573 path = btrfs_alloc_path();
574 if (!path)
575 return -ENOMEM;
576
577 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
578 key.type = BTRFS_DEV_ITEM_KEY;
579 key.offset = device->devid;
580
581 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
582 if (ret < 0)
583 goto out;
584
585 if (ret > 0) {
586 ret = -ENOENT;
587 goto out;
588 }
589
590 leaf = path->nodes[0];
591 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
592
593 btrfs_set_device_id(leaf, dev_item, device->devid);
594 btrfs_set_device_type(leaf, dev_item, device->type);
595 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
596 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
597 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
598 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
599 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
600 btrfs_mark_buffer_dirty(leaf);
601
602 out:
603 btrfs_free_path(path);
604 return ret;
605 }
606
607 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
608 struct btrfs_root *root,
609 struct btrfs_key *key,
610 struct btrfs_chunk *chunk, int item_size)
611 {
612 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
613 struct btrfs_disk_key disk_key;
614 u32 array_size;
615 u8 *ptr;
616
617 array_size = btrfs_super_sys_array_size(super_copy);
618 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
619 return -EFBIG;
620
621 ptr = super_copy->sys_chunk_array + array_size;
622 btrfs_cpu_key_to_disk(&disk_key, key);
623 memcpy(ptr, &disk_key, sizeof(disk_key));
624 ptr += sizeof(disk_key);
625 memcpy(ptr, chunk, item_size);
626 item_size += sizeof(disk_key);
627 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
628 return 0;
629 }
630
631 static u64 div_factor(u64 num, int factor)
632 {
633 if (factor == 10)
634 return num;
635 num *= factor;
636 do_div(num, 10);
637 return num;
638 }
639
640 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
641 int sub_stripes)
642 {
643 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
644 return calc_size;
645 else if (type & BTRFS_BLOCK_GROUP_RAID10)
646 return calc_size * (num_stripes / sub_stripes);
647 else
648 return calc_size * num_stripes;
649 }
650
651
652 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
653 struct btrfs_root *extent_root, u64 *start,
654 u64 *num_bytes, u64 type)
655 {
656 u64 dev_offset;
657 struct btrfs_fs_info *info = extent_root->fs_info;
658 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
659 struct btrfs_stripe *stripes;
660 struct btrfs_device *device = NULL;
661 struct btrfs_chunk *chunk;
662 struct list_head private_devs;
663 struct list_head *dev_list = &extent_root->fs_info->fs_devices->devices;
664 struct list_head *cur;
665 struct extent_map_tree *em_tree;
666 struct map_lookup *map;
667 struct extent_map *em;
668 int min_stripe_size = 1 * 1024 * 1024;
669 u64 physical;
670 u64 calc_size = 1024 * 1024 * 1024;
671 u64 max_chunk_size = calc_size;
672 u64 min_free;
673 u64 avail;
674 u64 max_avail = 0;
675 u64 percent_max;
676 int num_stripes = 1;
677 int min_stripes = 1;
678 int sub_stripes = 0;
679 int looped = 0;
680 int ret;
681 int index;
682 int stripe_len = 64 * 1024;
683 struct btrfs_key key;
684
685 if (list_empty(dev_list))
686 return -ENOSPC;
687
688 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
689 num_stripes = btrfs_super_num_devices(&info->super_copy);
690 min_stripes = 2;
691 }
692 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
693 num_stripes = 2;
694 min_stripes = 2;
695 }
696 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
697 num_stripes = min_t(u64, 2,
698 btrfs_super_num_devices(&info->super_copy));
699 if (num_stripes < 2)
700 return -ENOSPC;
701 min_stripes = 2;
702 }
703 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
704 num_stripes = btrfs_super_num_devices(&info->super_copy);
705 if (num_stripes < 4)
706 return -ENOSPC;
707 num_stripes &= ~(u32)1;
708 sub_stripes = 2;
709 min_stripes = 4;
710 }
711
712 if (type & BTRFS_BLOCK_GROUP_DATA) {
713 max_chunk_size = 10 * calc_size;
714 min_stripe_size = 64 * 1024 * 1024;
715 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
716 max_chunk_size = 4 * calc_size;
717 min_stripe_size = 32 * 1024 * 1024;
718 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
719 calc_size = 8 * 1024 * 1024;
720 max_chunk_size = calc_size * 2;
721 min_stripe_size = 1 * 1024 * 1024;
722 }
723
724 /* we don't want a chunk larger than 10% of the FS */
725 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
726 max_chunk_size = min(percent_max, max_chunk_size);
727
728 again:
729 if (calc_size * num_stripes > max_chunk_size) {
730 calc_size = max_chunk_size;
731 do_div(calc_size, num_stripes);
732 do_div(calc_size, stripe_len);
733 calc_size *= stripe_len;
734 }
735 /* we don't want tiny stripes */
736 calc_size = max_t(u64, min_stripe_size, calc_size);
737
738 do_div(calc_size, stripe_len);
739 calc_size *= stripe_len;
740
741 INIT_LIST_HEAD(&private_devs);
742 cur = dev_list->next;
743 index = 0;
744
745 if (type & BTRFS_BLOCK_GROUP_DUP)
746 min_free = calc_size * 2;
747 else
748 min_free = calc_size;
749
750 /* we add 1MB because we never use the first 1MB of the device */
751 min_free += 1024 * 1024;
752
753 /* build a private list of devices we will allocate from */
754 while(index < num_stripes) {
755 device = list_entry(cur, struct btrfs_device, dev_list);
756
757 avail = device->total_bytes - device->bytes_used;
758 cur = cur->next;
759 if (avail >= min_free) {
760 list_move_tail(&device->dev_list, &private_devs);
761 index++;
762 if (type & BTRFS_BLOCK_GROUP_DUP)
763 index++;
764 } else if (avail > max_avail)
765 max_avail = avail;
766 if (cur == dev_list)
767 break;
768 }
769 if (index < num_stripes) {
770 list_splice(&private_devs, dev_list);
771 if (index >= min_stripes) {
772 num_stripes = index;
773 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
774 num_stripes /= sub_stripes;
775 num_stripes *= sub_stripes;
776 }
777 looped = 1;
778 goto again;
779 }
780 if (!looped && max_avail > 0) {
781 looped = 1;
782 calc_size = max_avail;
783 goto again;
784 }
785 return -ENOSPC;
786 }
787 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
788 key.type = BTRFS_CHUNK_ITEM_KEY;
789 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
790 &key.offset);
791 if (ret)
792 return ret;
793
794 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
795 if (!chunk)
796 return -ENOMEM;
797
798 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
799 if (!map) {
800 kfree(chunk);
801 return -ENOMEM;
802 }
803
804 stripes = &chunk->stripe;
805 *num_bytes = chunk_bytes_by_type(type, calc_size,
806 num_stripes, sub_stripes);
807
808
809 index = 0;
810 printk("new chunk type %Lu start %Lu size %Lu\n", type, key.offset, *num_bytes);
811 while(index < num_stripes) {
812 struct btrfs_stripe *stripe;
813 BUG_ON(list_empty(&private_devs));
814 cur = private_devs.next;
815 device = list_entry(cur, struct btrfs_device, dev_list);
816
817 /* loop over this device again if we're doing a dup group */
818 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
819 (index == num_stripes - 1))
820 list_move_tail(&device->dev_list, dev_list);
821
822 ret = btrfs_alloc_dev_extent(trans, device,
823 info->chunk_root->root_key.objectid,
824 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
825 calc_size, &dev_offset);
826 BUG_ON(ret);
827 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.offset, calc_size, device->devid, type);
828 device->bytes_used += calc_size;
829 ret = btrfs_update_device(trans, device);
830 BUG_ON(ret);
831
832 map->stripes[index].dev = device;
833 map->stripes[index].physical = dev_offset;
834 stripe = stripes + index;
835 btrfs_set_stack_stripe_devid(stripe, device->devid);
836 btrfs_set_stack_stripe_offset(stripe, dev_offset);
837 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
838 physical = dev_offset;
839 index++;
840 }
841 BUG_ON(!list_empty(&private_devs));
842
843 /* key was set above */
844 btrfs_set_stack_chunk_length(chunk, *num_bytes);
845 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
846 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
847 btrfs_set_stack_chunk_type(chunk, type);
848 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
849 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
850 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
851 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
852 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
853 map->sector_size = extent_root->sectorsize;
854 map->stripe_len = stripe_len;
855 map->io_align = stripe_len;
856 map->io_width = stripe_len;
857 map->type = type;
858 map->num_stripes = num_stripes;
859 map->sub_stripes = sub_stripes;
860
861 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
862 btrfs_chunk_item_size(num_stripes));
863 BUG_ON(ret);
864 *start = key.offset;;
865
866 em = alloc_extent_map(GFP_NOFS);
867 if (!em)
868 return -ENOMEM;
869 em->bdev = (struct block_device *)map;
870 em->start = key.offset;
871 em->len = *num_bytes;
872 em->block_start = 0;
873
874 kfree(chunk);
875
876 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
877 spin_lock(&em_tree->lock);
878 ret = add_extent_mapping(em_tree, em);
879 spin_unlock(&em_tree->lock);
880 BUG_ON(ret);
881 free_extent_map(em);
882 return ret;
883 }
884
885 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
886 {
887 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
888 }
889
890 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
891 {
892 struct extent_map *em;
893
894 while(1) {
895 spin_lock(&tree->map_tree.lock);
896 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
897 if (em)
898 remove_extent_mapping(&tree->map_tree, em);
899 spin_unlock(&tree->map_tree.lock);
900 if (!em)
901 break;
902 kfree(em->bdev);
903 /* once for us */
904 free_extent_map(em);
905 /* once for the tree */
906 free_extent_map(em);
907 }
908 }
909
910 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
911 {
912 struct extent_map *em;
913 struct map_lookup *map;
914 struct extent_map_tree *em_tree = &map_tree->map_tree;
915 int ret;
916
917 spin_lock(&em_tree->lock);
918 em = lookup_extent_mapping(em_tree, logical, len);
919 spin_unlock(&em_tree->lock);
920 BUG_ON(!em);
921
922 BUG_ON(em->start > logical || em->start + em->len < logical);
923 map = (struct map_lookup *)em->bdev;
924 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
925 ret = map->num_stripes;
926 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
927 ret = map->sub_stripes;
928 else
929 ret = 1;
930 free_extent_map(em);
931 return ret;
932 }
933
934 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
935 u64 logical, u64 *length,
936 struct btrfs_multi_bio **multi_ret,
937 int mirror_num, struct page *unplug_page)
938 {
939 struct extent_map *em;
940 struct map_lookup *map;
941 struct extent_map_tree *em_tree = &map_tree->map_tree;
942 u64 offset;
943 u64 stripe_offset;
944 u64 stripe_nr;
945 int stripes_allocated = 8;
946 int stripes_required = 1;
947 int stripe_index;
948 int i;
949 int num_stripes;
950 struct btrfs_multi_bio *multi = NULL;
951
952 if (multi_ret && !(rw & (1 << BIO_RW))) {
953 stripes_allocated = 1;
954 }
955 again:
956 if (multi_ret) {
957 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
958 GFP_NOFS);
959 if (!multi)
960 return -ENOMEM;
961 }
962
963 spin_lock(&em_tree->lock);
964 em = lookup_extent_mapping(em_tree, logical, *length);
965 spin_unlock(&em_tree->lock);
966
967 if (!em && unplug_page)
968 return 0;
969
970 if (!em) {
971 printk("unable to find logical %Lu\n", logical);
972 BUG();
973 }
974
975 BUG_ON(em->start > logical || em->start + em->len < logical);
976 map = (struct map_lookup *)em->bdev;
977 offset = logical - em->start;
978
979 if (mirror_num > map->num_stripes)
980 mirror_num = 0;
981
982 /* if our multi bio struct is too small, back off and try again */
983 if (rw & (1 << BIO_RW)) {
984 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
985 BTRFS_BLOCK_GROUP_DUP)) {
986 stripes_required = map->num_stripes;
987 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
988 stripes_required = map->sub_stripes;
989 }
990 }
991 if (multi_ret && rw == WRITE &&
992 stripes_allocated < stripes_required) {
993 stripes_allocated = map->num_stripes;
994 free_extent_map(em);
995 kfree(multi);
996 goto again;
997 }
998 stripe_nr = offset;
999 /*
1000 * stripe_nr counts the total number of stripes we have to stride
1001 * to get to this block
1002 */
1003 do_div(stripe_nr, map->stripe_len);
1004
1005 stripe_offset = stripe_nr * map->stripe_len;
1006 BUG_ON(offset < stripe_offset);
1007
1008 /* stripe_offset is the offset of this block in its stripe*/
1009 stripe_offset = offset - stripe_offset;
1010
1011 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1012 BTRFS_BLOCK_GROUP_RAID10 |
1013 BTRFS_BLOCK_GROUP_DUP)) {
1014 /* we limit the length of each bio to what fits in a stripe */
1015 *length = min_t(u64, em->len - offset,
1016 map->stripe_len - stripe_offset);
1017 } else {
1018 *length = em->len - offset;
1019 }
1020
1021 if (!multi_ret && !unplug_page)
1022 goto out;
1023
1024 num_stripes = 1;
1025 stripe_index = 0;
1026 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1027 if (unplug_page || (rw & (1 << BIO_RW)))
1028 num_stripes = map->num_stripes;
1029 else if (mirror_num) {
1030 stripe_index = mirror_num - 1;
1031 } else {
1032 u64 orig_stripe_nr = stripe_nr;
1033 stripe_index = do_div(orig_stripe_nr, num_stripes);
1034 }
1035 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1036 if (rw & (1 << BIO_RW))
1037 num_stripes = map->num_stripes;
1038 else if (mirror_num)
1039 stripe_index = mirror_num - 1;
1040 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1041 int factor = map->num_stripes / map->sub_stripes;
1042
1043 stripe_index = do_div(stripe_nr, factor);
1044 stripe_index *= map->sub_stripes;
1045
1046 if (unplug_page || (rw & (1 << BIO_RW)))
1047 num_stripes = map->sub_stripes;
1048 else if (mirror_num)
1049 stripe_index += mirror_num - 1;
1050 else {
1051 u64 orig_stripe_nr = stripe_nr;
1052 stripe_index += do_div(orig_stripe_nr,
1053 map->sub_stripes);
1054 }
1055 } else {
1056 /*
1057 * after this do_div call, stripe_nr is the number of stripes
1058 * on this device we have to walk to find the data, and
1059 * stripe_index is the number of our device in the stripe array
1060 */
1061 stripe_index = do_div(stripe_nr, map->num_stripes);
1062 }
1063 BUG_ON(stripe_index >= map->num_stripes);
1064
1065 for (i = 0; i < num_stripes; i++) {
1066 if (unplug_page) {
1067 struct btrfs_device *device;
1068 struct backing_dev_info *bdi;
1069
1070 device = map->stripes[stripe_index].dev;
1071 bdi = blk_get_backing_dev_info(device->bdev);
1072 if (bdi->unplug_io_fn) {
1073 bdi->unplug_io_fn(bdi, unplug_page);
1074 }
1075 } else {
1076 multi->stripes[i].physical =
1077 map->stripes[stripe_index].physical +
1078 stripe_offset + stripe_nr * map->stripe_len;
1079 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1080 }
1081 stripe_index++;
1082 }
1083 if (multi_ret) {
1084 *multi_ret = multi;
1085 multi->num_stripes = num_stripes;
1086 }
1087 out:
1088 free_extent_map(em);
1089 return 0;
1090 }
1091
1092 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1093 u64 logical, u64 *length,
1094 struct btrfs_multi_bio **multi_ret, int mirror_num)
1095 {
1096 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1097 mirror_num, NULL);
1098 }
1099
1100 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1101 u64 logical, struct page *page)
1102 {
1103 u64 length = PAGE_CACHE_SIZE;
1104 return __btrfs_map_block(map_tree, READ, logical, &length,
1105 NULL, 0, page);
1106 }
1107
1108
1109 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1110 static void end_bio_multi_stripe(struct bio *bio, int err)
1111 #else
1112 static int end_bio_multi_stripe(struct bio *bio,
1113 unsigned int bytes_done, int err)
1114 #endif
1115 {
1116 struct btrfs_multi_bio *multi = bio->bi_private;
1117
1118 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1119 if (bio->bi_size)
1120 return 1;
1121 #endif
1122 if (err)
1123 multi->error = err;
1124
1125 if (atomic_dec_and_test(&multi->stripes_pending)) {
1126 bio->bi_private = multi->private;
1127 bio->bi_end_io = multi->end_io;
1128
1129 if (!err && multi->error)
1130 err = multi->error;
1131 kfree(multi);
1132
1133 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1134 bio_endio(bio, bio->bi_size, err);
1135 #else
1136 bio_endio(bio, err);
1137 #endif
1138 } else {
1139 bio_put(bio);
1140 }
1141 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1142 return 0;
1143 #endif
1144 }
1145
1146 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1147 int mirror_num)
1148 {
1149 struct btrfs_mapping_tree *map_tree;
1150 struct btrfs_device *dev;
1151 struct bio *first_bio = bio;
1152 u64 logical = bio->bi_sector << 9;
1153 u64 length = 0;
1154 u64 map_length;
1155 struct btrfs_multi_bio *multi = NULL;
1156 int ret;
1157 int dev_nr = 0;
1158 int total_devs = 1;
1159
1160 length = bio->bi_size;
1161
1162 map_tree = &root->fs_info->mapping_tree;
1163 map_length = length;
1164
1165 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1166 mirror_num);
1167 BUG_ON(ret);
1168
1169 total_devs = multi->num_stripes;
1170 if (map_length < length) {
1171 printk("mapping failed logical %Lu bio len %Lu "
1172 "len %Lu\n", logical, length, map_length);
1173 BUG();
1174 }
1175 multi->end_io = first_bio->bi_end_io;
1176 multi->private = first_bio->bi_private;
1177 atomic_set(&multi->stripes_pending, multi->num_stripes);
1178
1179 while(dev_nr < total_devs) {
1180 if (total_devs > 1) {
1181 if (dev_nr < total_devs - 1) {
1182 bio = bio_clone(first_bio, GFP_NOFS);
1183 BUG_ON(!bio);
1184 } else {
1185 bio = first_bio;
1186 }
1187 bio->bi_private = multi;
1188 bio->bi_end_io = end_bio_multi_stripe;
1189 }
1190 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1191 dev = multi->stripes[dev_nr].dev;
1192 bio->bi_bdev = dev->bdev;
1193 spin_lock(&dev->io_lock);
1194 dev->total_ios++;
1195 spin_unlock(&dev->io_lock);
1196 submit_bio(rw, bio);
1197 dev_nr++;
1198 }
1199 if (total_devs == 1)
1200 kfree(multi);
1201 return 0;
1202 }
1203
1204 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1205 u8 *uuid)
1206 {
1207 struct list_head *head = &root->fs_info->fs_devices->devices;
1208
1209 return __find_device(head, devid, uuid);
1210 }
1211
1212 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1213 struct extent_buffer *leaf,
1214 struct btrfs_chunk *chunk)
1215 {
1216 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1217 struct map_lookup *map;
1218 struct extent_map *em;
1219 u64 logical;
1220 u64 length;
1221 u64 devid;
1222 u8 uuid[BTRFS_UUID_SIZE];
1223 int num_stripes;
1224 int ret;
1225 int i;
1226
1227 logical = key->offset;
1228 length = btrfs_chunk_length(leaf, chunk);
1229 spin_lock(&map_tree->map_tree.lock);
1230 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
1231 spin_unlock(&map_tree->map_tree.lock);
1232
1233 /* already mapped? */
1234 if (em && em->start <= logical && em->start + em->len > logical) {
1235 free_extent_map(em);
1236 return 0;
1237 } else if (em) {
1238 free_extent_map(em);
1239 }
1240
1241 map = kzalloc(sizeof(*map), GFP_NOFS);
1242 if (!map)
1243 return -ENOMEM;
1244
1245 em = alloc_extent_map(GFP_NOFS);
1246 if (!em)
1247 return -ENOMEM;
1248 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1249 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1250 if (!map) {
1251 free_extent_map(em);
1252 return -ENOMEM;
1253 }
1254
1255 em->bdev = (struct block_device *)map;
1256 em->start = logical;
1257 em->len = length;
1258 em->block_start = 0;
1259
1260 map->num_stripes = num_stripes;
1261 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1262 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1263 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1264 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1265 map->type = btrfs_chunk_type(leaf, chunk);
1266 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
1267 for (i = 0; i < num_stripes; i++) {
1268 map->stripes[i].physical =
1269 btrfs_stripe_offset_nr(leaf, chunk, i);
1270 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
1271 read_extent_buffer(leaf, uuid, (unsigned long)
1272 btrfs_stripe_dev_uuid_nr(chunk, i),
1273 BTRFS_UUID_SIZE);
1274 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
1275 if (!map->stripes[i].dev) {
1276 kfree(map);
1277 free_extent_map(em);
1278 return -EIO;
1279 }
1280 }
1281
1282 spin_lock(&map_tree->map_tree.lock);
1283 ret = add_extent_mapping(&map_tree->map_tree, em);
1284 spin_unlock(&map_tree->map_tree.lock);
1285 BUG_ON(ret);
1286 free_extent_map(em);
1287
1288 return 0;
1289 }
1290
1291 static int fill_device_from_item(struct extent_buffer *leaf,
1292 struct btrfs_dev_item *dev_item,
1293 struct btrfs_device *device)
1294 {
1295 unsigned long ptr;
1296
1297 device->devid = btrfs_device_id(leaf, dev_item);
1298 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1299 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1300 device->type = btrfs_device_type(leaf, dev_item);
1301 device->io_align = btrfs_device_io_align(leaf, dev_item);
1302 device->io_width = btrfs_device_io_width(leaf, dev_item);
1303 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
1304
1305 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1306 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1307
1308 return 0;
1309 }
1310
1311 static int read_one_dev(struct btrfs_root *root,
1312 struct extent_buffer *leaf,
1313 struct btrfs_dev_item *dev_item)
1314 {
1315 struct btrfs_device *device;
1316 u64 devid;
1317 int ret;
1318 u8 dev_uuid[BTRFS_UUID_SIZE];
1319
1320 devid = btrfs_device_id(leaf, dev_item);
1321 read_extent_buffer(leaf, dev_uuid,
1322 (unsigned long)btrfs_device_uuid(dev_item),
1323 BTRFS_UUID_SIZE);
1324 device = btrfs_find_device(root, devid, dev_uuid);
1325 if (!device) {
1326 printk("warning devid %Lu not found already\n", devid);
1327 device = kzalloc(sizeof(*device), GFP_NOFS);
1328 if (!device)
1329 return -ENOMEM;
1330 list_add(&device->dev_list,
1331 &root->fs_info->fs_devices->devices);
1332 device->barriers = 1;
1333 spin_lock_init(&device->io_lock);
1334 }
1335
1336 fill_device_from_item(leaf, dev_item, device);
1337 device->dev_root = root->fs_info->dev_root;
1338 ret = 0;
1339 #if 0
1340 ret = btrfs_open_device(device);
1341 if (ret) {
1342 kfree(device);
1343 }
1344 #endif
1345 return ret;
1346 }
1347
1348 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
1349 {
1350 struct btrfs_dev_item *dev_item;
1351
1352 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
1353 dev_item);
1354 return read_one_dev(root, buf, dev_item);
1355 }
1356
1357 int btrfs_read_sys_array(struct btrfs_root *root)
1358 {
1359 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1360 struct extent_buffer *sb = root->fs_info->sb_buffer;
1361 struct btrfs_disk_key *disk_key;
1362 struct btrfs_chunk *chunk;
1363 struct btrfs_key key;
1364 u32 num_stripes;
1365 u32 array_size;
1366 u32 len = 0;
1367 u8 *ptr;
1368 unsigned long sb_ptr;
1369 u32 cur;
1370 int ret;
1371
1372 array_size = btrfs_super_sys_array_size(super_copy);
1373
1374 /*
1375 * we do this loop twice, once for the device items and
1376 * once for all of the chunks. This way there are device
1377 * structs filled in for every chunk
1378 */
1379 ptr = super_copy->sys_chunk_array;
1380 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
1381 cur = 0;
1382
1383 while (cur < array_size) {
1384 disk_key = (struct btrfs_disk_key *)ptr;
1385 btrfs_disk_key_to_cpu(&key, disk_key);
1386
1387 len = sizeof(*disk_key);
1388 ptr += len;
1389 sb_ptr += len;
1390 cur += len;
1391
1392 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1393 chunk = (struct btrfs_chunk *)sb_ptr;
1394 ret = read_one_chunk(root, &key, sb, chunk);
1395 BUG_ON(ret);
1396 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
1397 len = btrfs_chunk_item_size(num_stripes);
1398 } else {
1399 BUG();
1400 }
1401 ptr += len;
1402 sb_ptr += len;
1403 cur += len;
1404 }
1405 return 0;
1406 }
1407
1408 int btrfs_read_chunk_tree(struct btrfs_root *root)
1409 {
1410 struct btrfs_path *path;
1411 struct extent_buffer *leaf;
1412 struct btrfs_key key;
1413 struct btrfs_key found_key;
1414 int ret;
1415 int slot;
1416
1417 root = root->fs_info->chunk_root;
1418
1419 path = btrfs_alloc_path();
1420 if (!path)
1421 return -ENOMEM;
1422
1423 /* first we search for all of the device items, and then we
1424 * read in all of the chunk items. This way we can create chunk
1425 * mappings that reference all of the devices that are afound
1426 */
1427 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1428 key.offset = 0;
1429 key.type = 0;
1430 again:
1431 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1432 while(1) {
1433 leaf = path->nodes[0];
1434 slot = path->slots[0];
1435 if (slot >= btrfs_header_nritems(leaf)) {
1436 ret = btrfs_next_leaf(root, path);
1437 if (ret == 0)
1438 continue;
1439 if (ret < 0)
1440 goto error;
1441 break;
1442 }
1443 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1444 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1445 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
1446 break;
1447 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
1448 struct btrfs_dev_item *dev_item;
1449 dev_item = btrfs_item_ptr(leaf, slot,
1450 struct btrfs_dev_item);
1451 ret = read_one_dev(root, leaf, dev_item);
1452 BUG_ON(ret);
1453 }
1454 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
1455 struct btrfs_chunk *chunk;
1456 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1457 ret = read_one_chunk(root, &found_key, leaf, chunk);
1458 }
1459 path->slots[0]++;
1460 }
1461 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
1462 key.objectid = 0;
1463 btrfs_release_path(root, path);
1464 goto again;
1465 }
1466
1467 btrfs_free_path(path);
1468 ret = 0;
1469 error:
1470 return ret;
1471 }
1472
This page took 0.067334 seconds and 6 git commands to generate.