Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / fs / btrfs / free-space-cache.c
1 /*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
24 #include "ctree.h"
25 #include "free-space-cache.h"
26 #include "transaction.h"
27 #include "disk-io.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
30
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33
34 static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info);
36 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37 struct btrfs_free_space *info);
38
39 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
40 struct btrfs_path *path,
41 u64 offset)
42 {
43 struct btrfs_key key;
44 struct btrfs_key location;
45 struct btrfs_disk_key disk_key;
46 struct btrfs_free_space_header *header;
47 struct extent_buffer *leaf;
48 struct inode *inode = NULL;
49 int ret;
50
51 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
52 key.offset = offset;
53 key.type = 0;
54
55 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
56 if (ret < 0)
57 return ERR_PTR(ret);
58 if (ret > 0) {
59 btrfs_release_path(path);
60 return ERR_PTR(-ENOENT);
61 }
62
63 leaf = path->nodes[0];
64 header = btrfs_item_ptr(leaf, path->slots[0],
65 struct btrfs_free_space_header);
66 btrfs_free_space_key(leaf, header, &disk_key);
67 btrfs_disk_key_to_cpu(&location, &disk_key);
68 btrfs_release_path(path);
69
70 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
71 if (!inode)
72 return ERR_PTR(-ENOENT);
73 if (IS_ERR(inode))
74 return inode;
75 if (is_bad_inode(inode)) {
76 iput(inode);
77 return ERR_PTR(-ENOENT);
78 }
79
80 mapping_set_gfp_mask(inode->i_mapping,
81 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
82
83 return inode;
84 }
85
86 struct inode *lookup_free_space_inode(struct btrfs_root *root,
87 struct btrfs_block_group_cache
88 *block_group, struct btrfs_path *path)
89 {
90 struct inode *inode = NULL;
91 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
92
93 spin_lock(&block_group->lock);
94 if (block_group->inode)
95 inode = igrab(block_group->inode);
96 spin_unlock(&block_group->lock);
97 if (inode)
98 return inode;
99
100 inode = __lookup_free_space_inode(root, path,
101 block_group->key.objectid);
102 if (IS_ERR(inode))
103 return inode;
104
105 spin_lock(&block_group->lock);
106 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
107 printk(KERN_INFO "Old style space inode found, converting.\n");
108 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
109 BTRFS_INODE_NODATACOW;
110 block_group->disk_cache_state = BTRFS_DC_CLEAR;
111 }
112
113 if (!block_group->iref) {
114 block_group->inode = igrab(inode);
115 block_group->iref = 1;
116 }
117 spin_unlock(&block_group->lock);
118
119 return inode;
120 }
121
122 int __create_free_space_inode(struct btrfs_root *root,
123 struct btrfs_trans_handle *trans,
124 struct btrfs_path *path, u64 ino, u64 offset)
125 {
126 struct btrfs_key key;
127 struct btrfs_disk_key disk_key;
128 struct btrfs_free_space_header *header;
129 struct btrfs_inode_item *inode_item;
130 struct extent_buffer *leaf;
131 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
132 int ret;
133
134 ret = btrfs_insert_empty_inode(trans, root, path, ino);
135 if (ret)
136 return ret;
137
138 /* We inline crc's for the free disk space cache */
139 if (ino != BTRFS_FREE_INO_OBJECTID)
140 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
141
142 leaf = path->nodes[0];
143 inode_item = btrfs_item_ptr(leaf, path->slots[0],
144 struct btrfs_inode_item);
145 btrfs_item_key(leaf, &disk_key, path->slots[0]);
146 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
147 sizeof(*inode_item));
148 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
149 btrfs_set_inode_size(leaf, inode_item, 0);
150 btrfs_set_inode_nbytes(leaf, inode_item, 0);
151 btrfs_set_inode_uid(leaf, inode_item, 0);
152 btrfs_set_inode_gid(leaf, inode_item, 0);
153 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
154 btrfs_set_inode_flags(leaf, inode_item, flags);
155 btrfs_set_inode_nlink(leaf, inode_item, 1);
156 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
157 btrfs_set_inode_block_group(leaf, inode_item, offset);
158 btrfs_mark_buffer_dirty(leaf);
159 btrfs_release_path(path);
160
161 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
162 key.offset = offset;
163 key.type = 0;
164
165 ret = btrfs_insert_empty_item(trans, root, path, &key,
166 sizeof(struct btrfs_free_space_header));
167 if (ret < 0) {
168 btrfs_release_path(path);
169 return ret;
170 }
171 leaf = path->nodes[0];
172 header = btrfs_item_ptr(leaf, path->slots[0],
173 struct btrfs_free_space_header);
174 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
175 btrfs_set_free_space_key(leaf, header, &disk_key);
176 btrfs_mark_buffer_dirty(leaf);
177 btrfs_release_path(path);
178
179 return 0;
180 }
181
182 int create_free_space_inode(struct btrfs_root *root,
183 struct btrfs_trans_handle *trans,
184 struct btrfs_block_group_cache *block_group,
185 struct btrfs_path *path)
186 {
187 int ret;
188 u64 ino;
189
190 ret = btrfs_find_free_objectid(root, &ino);
191 if (ret < 0)
192 return ret;
193
194 return __create_free_space_inode(root, trans, path, ino,
195 block_group->key.objectid);
196 }
197
198 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
199 struct btrfs_trans_handle *trans,
200 struct btrfs_path *path,
201 struct inode *inode)
202 {
203 struct btrfs_block_rsv *rsv;
204 u64 needed_bytes;
205 loff_t oldsize;
206 int ret = 0;
207
208 rsv = trans->block_rsv;
209 trans->block_rsv = &root->fs_info->global_block_rsv;
210
211 /* 1 for slack space, 1 for updating the inode */
212 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
213 btrfs_calc_trans_metadata_size(root, 1);
214
215 spin_lock(&trans->block_rsv->lock);
216 if (trans->block_rsv->reserved < needed_bytes) {
217 spin_unlock(&trans->block_rsv->lock);
218 trans->block_rsv = rsv;
219 return -ENOSPC;
220 }
221 spin_unlock(&trans->block_rsv->lock);
222
223 oldsize = i_size_read(inode);
224 btrfs_i_size_write(inode, 0);
225 truncate_pagecache(inode, oldsize, 0);
226
227 /*
228 * We don't need an orphan item because truncating the free space cache
229 * will never be split across transactions.
230 */
231 ret = btrfs_truncate_inode_items(trans, root, inode,
232 0, BTRFS_EXTENT_DATA_KEY);
233
234 if (ret) {
235 trans->block_rsv = rsv;
236 btrfs_abort_transaction(trans, root, ret);
237 return ret;
238 }
239
240 ret = btrfs_update_inode(trans, root, inode);
241 if (ret)
242 btrfs_abort_transaction(trans, root, ret);
243 trans->block_rsv = rsv;
244
245 return ret;
246 }
247
248 static int readahead_cache(struct inode *inode)
249 {
250 struct file_ra_state *ra;
251 unsigned long last_index;
252
253 ra = kzalloc(sizeof(*ra), GFP_NOFS);
254 if (!ra)
255 return -ENOMEM;
256
257 file_ra_state_init(ra, inode->i_mapping);
258 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
259
260 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
261
262 kfree(ra);
263
264 return 0;
265 }
266
267 struct io_ctl {
268 void *cur, *orig;
269 struct page *page;
270 struct page **pages;
271 struct btrfs_root *root;
272 unsigned long size;
273 int index;
274 int num_pages;
275 unsigned check_crcs:1;
276 };
277
278 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
279 struct btrfs_root *root)
280 {
281 memset(io_ctl, 0, sizeof(struct io_ctl));
282 io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
283 PAGE_CACHE_SHIFT;
284 io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
285 GFP_NOFS);
286 if (!io_ctl->pages)
287 return -ENOMEM;
288 io_ctl->root = root;
289 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
290 io_ctl->check_crcs = 1;
291 return 0;
292 }
293
294 static void io_ctl_free(struct io_ctl *io_ctl)
295 {
296 kfree(io_ctl->pages);
297 }
298
299 static void io_ctl_unmap_page(struct io_ctl *io_ctl)
300 {
301 if (io_ctl->cur) {
302 kunmap(io_ctl->page);
303 io_ctl->cur = NULL;
304 io_ctl->orig = NULL;
305 }
306 }
307
308 static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
309 {
310 WARN_ON(io_ctl->cur);
311 BUG_ON(io_ctl->index >= io_ctl->num_pages);
312 io_ctl->page = io_ctl->pages[io_ctl->index++];
313 io_ctl->cur = kmap(io_ctl->page);
314 io_ctl->orig = io_ctl->cur;
315 io_ctl->size = PAGE_CACHE_SIZE;
316 if (clear)
317 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
318 }
319
320 static void io_ctl_drop_pages(struct io_ctl *io_ctl)
321 {
322 int i;
323
324 io_ctl_unmap_page(io_ctl);
325
326 for (i = 0; i < io_ctl->num_pages; i++) {
327 if (io_ctl->pages[i]) {
328 ClearPageChecked(io_ctl->pages[i]);
329 unlock_page(io_ctl->pages[i]);
330 page_cache_release(io_ctl->pages[i]);
331 }
332 }
333 }
334
335 static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
336 int uptodate)
337 {
338 struct page *page;
339 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
340 int i;
341
342 for (i = 0; i < io_ctl->num_pages; i++) {
343 page = find_or_create_page(inode->i_mapping, i, mask);
344 if (!page) {
345 io_ctl_drop_pages(io_ctl);
346 return -ENOMEM;
347 }
348 io_ctl->pages[i] = page;
349 if (uptodate && !PageUptodate(page)) {
350 btrfs_readpage(NULL, page);
351 lock_page(page);
352 if (!PageUptodate(page)) {
353 printk(KERN_ERR "btrfs: error reading free "
354 "space cache\n");
355 io_ctl_drop_pages(io_ctl);
356 return -EIO;
357 }
358 }
359 }
360
361 for (i = 0; i < io_ctl->num_pages; i++) {
362 clear_page_dirty_for_io(io_ctl->pages[i]);
363 set_page_extent_mapped(io_ctl->pages[i]);
364 }
365
366 return 0;
367 }
368
369 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
370 {
371 __le64 *val;
372
373 io_ctl_map_page(io_ctl, 1);
374
375 /*
376 * Skip the csum areas. If we don't check crcs then we just have a
377 * 64bit chunk at the front of the first page.
378 */
379 if (io_ctl->check_crcs) {
380 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
381 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
382 } else {
383 io_ctl->cur += sizeof(u64);
384 io_ctl->size -= sizeof(u64) * 2;
385 }
386
387 val = io_ctl->cur;
388 *val = cpu_to_le64(generation);
389 io_ctl->cur += sizeof(u64);
390 }
391
392 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
393 {
394 __le64 *gen;
395
396 /*
397 * Skip the crc area. If we don't check crcs then we just have a 64bit
398 * chunk at the front of the first page.
399 */
400 if (io_ctl->check_crcs) {
401 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
402 io_ctl->size -= sizeof(u64) +
403 (sizeof(u32) * io_ctl->num_pages);
404 } else {
405 io_ctl->cur += sizeof(u64);
406 io_ctl->size -= sizeof(u64) * 2;
407 }
408
409 gen = io_ctl->cur;
410 if (le64_to_cpu(*gen) != generation) {
411 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
412 "(%Lu) does not match inode (%Lu)\n", *gen,
413 generation);
414 io_ctl_unmap_page(io_ctl);
415 return -EIO;
416 }
417 io_ctl->cur += sizeof(u64);
418 return 0;
419 }
420
421 static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
422 {
423 u32 *tmp;
424 u32 crc = ~(u32)0;
425 unsigned offset = 0;
426
427 if (!io_ctl->check_crcs) {
428 io_ctl_unmap_page(io_ctl);
429 return;
430 }
431
432 if (index == 0)
433 offset = sizeof(u32) * io_ctl->num_pages;
434
435 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
436 PAGE_CACHE_SIZE - offset);
437 btrfs_csum_final(crc, (char *)&crc);
438 io_ctl_unmap_page(io_ctl);
439 tmp = kmap(io_ctl->pages[0]);
440 tmp += index;
441 *tmp = crc;
442 kunmap(io_ctl->pages[0]);
443 }
444
445 static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
446 {
447 u32 *tmp, val;
448 u32 crc = ~(u32)0;
449 unsigned offset = 0;
450
451 if (!io_ctl->check_crcs) {
452 io_ctl_map_page(io_ctl, 0);
453 return 0;
454 }
455
456 if (index == 0)
457 offset = sizeof(u32) * io_ctl->num_pages;
458
459 tmp = kmap(io_ctl->pages[0]);
460 tmp += index;
461 val = *tmp;
462 kunmap(io_ctl->pages[0]);
463
464 io_ctl_map_page(io_ctl, 0);
465 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
466 PAGE_CACHE_SIZE - offset);
467 btrfs_csum_final(crc, (char *)&crc);
468 if (val != crc) {
469 printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
470 "space cache\n");
471 io_ctl_unmap_page(io_ctl);
472 return -EIO;
473 }
474
475 return 0;
476 }
477
478 static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
479 void *bitmap)
480 {
481 struct btrfs_free_space_entry *entry;
482
483 if (!io_ctl->cur)
484 return -ENOSPC;
485
486 entry = io_ctl->cur;
487 entry->offset = cpu_to_le64(offset);
488 entry->bytes = cpu_to_le64(bytes);
489 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
490 BTRFS_FREE_SPACE_EXTENT;
491 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
492 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
493
494 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
495 return 0;
496
497 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
498
499 /* No more pages to map */
500 if (io_ctl->index >= io_ctl->num_pages)
501 return 0;
502
503 /* map the next page */
504 io_ctl_map_page(io_ctl, 1);
505 return 0;
506 }
507
508 static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
509 {
510 if (!io_ctl->cur)
511 return -ENOSPC;
512
513 /*
514 * If we aren't at the start of the current page, unmap this one and
515 * map the next one if there is any left.
516 */
517 if (io_ctl->cur != io_ctl->orig) {
518 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
519 if (io_ctl->index >= io_ctl->num_pages)
520 return -ENOSPC;
521 io_ctl_map_page(io_ctl, 0);
522 }
523
524 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
525 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
526 if (io_ctl->index < io_ctl->num_pages)
527 io_ctl_map_page(io_ctl, 0);
528 return 0;
529 }
530
531 static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
532 {
533 /*
534 * If we're not on the boundary we know we've modified the page and we
535 * need to crc the page.
536 */
537 if (io_ctl->cur != io_ctl->orig)
538 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
539 else
540 io_ctl_unmap_page(io_ctl);
541
542 while (io_ctl->index < io_ctl->num_pages) {
543 io_ctl_map_page(io_ctl, 1);
544 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
545 }
546 }
547
548 static int io_ctl_read_entry(struct io_ctl *io_ctl,
549 struct btrfs_free_space *entry, u8 *type)
550 {
551 struct btrfs_free_space_entry *e;
552 int ret;
553
554 if (!io_ctl->cur) {
555 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
556 if (ret)
557 return ret;
558 }
559
560 e = io_ctl->cur;
561 entry->offset = le64_to_cpu(e->offset);
562 entry->bytes = le64_to_cpu(e->bytes);
563 *type = e->type;
564 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
565 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
566
567 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
568 return 0;
569
570 io_ctl_unmap_page(io_ctl);
571
572 return 0;
573 }
574
575 static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
576 struct btrfs_free_space *entry)
577 {
578 int ret;
579
580 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
581 if (ret)
582 return ret;
583
584 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
585 io_ctl_unmap_page(io_ctl);
586
587 return 0;
588 }
589
590 /*
591 * Since we attach pinned extents after the fact we can have contiguous sections
592 * of free space that are split up in entries. This poses a problem with the
593 * tree logging stuff since it could have allocated across what appears to be 2
594 * entries since we would have merged the entries when adding the pinned extents
595 * back to the free space cache. So run through the space cache that we just
596 * loaded and merge contiguous entries. This will make the log replay stuff not
597 * blow up and it will make for nicer allocator behavior.
598 */
599 static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
600 {
601 struct btrfs_free_space *e, *prev = NULL;
602 struct rb_node *n;
603
604 again:
605 spin_lock(&ctl->tree_lock);
606 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
607 e = rb_entry(n, struct btrfs_free_space, offset_index);
608 if (!prev)
609 goto next;
610 if (e->bitmap || prev->bitmap)
611 goto next;
612 if (prev->offset + prev->bytes == e->offset) {
613 unlink_free_space(ctl, prev);
614 unlink_free_space(ctl, e);
615 prev->bytes += e->bytes;
616 kmem_cache_free(btrfs_free_space_cachep, e);
617 link_free_space(ctl, prev);
618 prev = NULL;
619 spin_unlock(&ctl->tree_lock);
620 goto again;
621 }
622 next:
623 prev = e;
624 }
625 spin_unlock(&ctl->tree_lock);
626 }
627
628 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
629 struct btrfs_free_space_ctl *ctl,
630 struct btrfs_path *path, u64 offset)
631 {
632 struct btrfs_free_space_header *header;
633 struct extent_buffer *leaf;
634 struct io_ctl io_ctl;
635 struct btrfs_key key;
636 struct btrfs_free_space *e, *n;
637 struct list_head bitmaps;
638 u64 num_entries;
639 u64 num_bitmaps;
640 u64 generation;
641 u8 type;
642 int ret = 0;
643
644 INIT_LIST_HEAD(&bitmaps);
645
646 /* Nothing in the space cache, goodbye */
647 if (!i_size_read(inode))
648 return 0;
649
650 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
651 key.offset = offset;
652 key.type = 0;
653
654 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
655 if (ret < 0)
656 return 0;
657 else if (ret > 0) {
658 btrfs_release_path(path);
659 return 0;
660 }
661
662 ret = -1;
663
664 leaf = path->nodes[0];
665 header = btrfs_item_ptr(leaf, path->slots[0],
666 struct btrfs_free_space_header);
667 num_entries = btrfs_free_space_entries(leaf, header);
668 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
669 generation = btrfs_free_space_generation(leaf, header);
670 btrfs_release_path(path);
671
672 if (BTRFS_I(inode)->generation != generation) {
673 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
674 " not match free space cache generation (%llu)\n",
675 (unsigned long long)BTRFS_I(inode)->generation,
676 (unsigned long long)generation);
677 return 0;
678 }
679
680 if (!num_entries)
681 return 0;
682
683 ret = io_ctl_init(&io_ctl, inode, root);
684 if (ret)
685 return ret;
686
687 ret = readahead_cache(inode);
688 if (ret)
689 goto out;
690
691 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
692 if (ret)
693 goto out;
694
695 ret = io_ctl_check_crc(&io_ctl, 0);
696 if (ret)
697 goto free_cache;
698
699 ret = io_ctl_check_generation(&io_ctl, generation);
700 if (ret)
701 goto free_cache;
702
703 while (num_entries) {
704 e = kmem_cache_zalloc(btrfs_free_space_cachep,
705 GFP_NOFS);
706 if (!e)
707 goto free_cache;
708
709 ret = io_ctl_read_entry(&io_ctl, e, &type);
710 if (ret) {
711 kmem_cache_free(btrfs_free_space_cachep, e);
712 goto free_cache;
713 }
714
715 if (!e->bytes) {
716 kmem_cache_free(btrfs_free_space_cachep, e);
717 goto free_cache;
718 }
719
720 if (type == BTRFS_FREE_SPACE_EXTENT) {
721 spin_lock(&ctl->tree_lock);
722 ret = link_free_space(ctl, e);
723 spin_unlock(&ctl->tree_lock);
724 if (ret) {
725 printk(KERN_ERR "Duplicate entries in "
726 "free space cache, dumping\n");
727 kmem_cache_free(btrfs_free_space_cachep, e);
728 goto free_cache;
729 }
730 } else {
731 BUG_ON(!num_bitmaps);
732 num_bitmaps--;
733 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
734 if (!e->bitmap) {
735 kmem_cache_free(
736 btrfs_free_space_cachep, e);
737 goto free_cache;
738 }
739 spin_lock(&ctl->tree_lock);
740 ret = link_free_space(ctl, e);
741 ctl->total_bitmaps++;
742 ctl->op->recalc_thresholds(ctl);
743 spin_unlock(&ctl->tree_lock);
744 if (ret) {
745 printk(KERN_ERR "Duplicate entries in "
746 "free space cache, dumping\n");
747 kmem_cache_free(btrfs_free_space_cachep, e);
748 goto free_cache;
749 }
750 list_add_tail(&e->list, &bitmaps);
751 }
752
753 num_entries--;
754 }
755
756 io_ctl_unmap_page(&io_ctl);
757
758 /*
759 * We add the bitmaps at the end of the entries in order that
760 * the bitmap entries are added to the cache.
761 */
762 list_for_each_entry_safe(e, n, &bitmaps, list) {
763 list_del_init(&e->list);
764 ret = io_ctl_read_bitmap(&io_ctl, e);
765 if (ret)
766 goto free_cache;
767 }
768
769 io_ctl_drop_pages(&io_ctl);
770 merge_space_tree(ctl);
771 ret = 1;
772 out:
773 io_ctl_free(&io_ctl);
774 return ret;
775 free_cache:
776 io_ctl_drop_pages(&io_ctl);
777 __btrfs_remove_free_space_cache(ctl);
778 goto out;
779 }
780
781 int load_free_space_cache(struct btrfs_fs_info *fs_info,
782 struct btrfs_block_group_cache *block_group)
783 {
784 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
785 struct btrfs_root *root = fs_info->tree_root;
786 struct inode *inode;
787 struct btrfs_path *path;
788 int ret = 0;
789 bool matched;
790 u64 used = btrfs_block_group_used(&block_group->item);
791
792 /*
793 * If this block group has been marked to be cleared for one reason or
794 * another then we can't trust the on disk cache, so just return.
795 */
796 spin_lock(&block_group->lock);
797 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
798 spin_unlock(&block_group->lock);
799 return 0;
800 }
801 spin_unlock(&block_group->lock);
802
803 path = btrfs_alloc_path();
804 if (!path)
805 return 0;
806 path->search_commit_root = 1;
807 path->skip_locking = 1;
808
809 inode = lookup_free_space_inode(root, block_group, path);
810 if (IS_ERR(inode)) {
811 btrfs_free_path(path);
812 return 0;
813 }
814
815 /* We may have converted the inode and made the cache invalid. */
816 spin_lock(&block_group->lock);
817 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
818 spin_unlock(&block_group->lock);
819 btrfs_free_path(path);
820 goto out;
821 }
822 spin_unlock(&block_group->lock);
823
824 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
825 path, block_group->key.objectid);
826 btrfs_free_path(path);
827 if (ret <= 0)
828 goto out;
829
830 spin_lock(&ctl->tree_lock);
831 matched = (ctl->free_space == (block_group->key.offset - used -
832 block_group->bytes_super));
833 spin_unlock(&ctl->tree_lock);
834
835 if (!matched) {
836 __btrfs_remove_free_space_cache(ctl);
837 printk(KERN_ERR "block group %llu has an wrong amount of free "
838 "space\n", block_group->key.objectid);
839 ret = -1;
840 }
841 out:
842 if (ret < 0) {
843 /* This cache is bogus, make sure it gets cleared */
844 spin_lock(&block_group->lock);
845 block_group->disk_cache_state = BTRFS_DC_CLEAR;
846 spin_unlock(&block_group->lock);
847 ret = 0;
848
849 printk(KERN_ERR "btrfs: failed to load free space cache "
850 "for block group %llu\n", block_group->key.objectid);
851 }
852
853 iput(inode);
854 return ret;
855 }
856
857 /**
858 * __btrfs_write_out_cache - write out cached info to an inode
859 * @root - the root the inode belongs to
860 * @ctl - the free space cache we are going to write out
861 * @block_group - the block_group for this cache if it belongs to a block_group
862 * @trans - the trans handle
863 * @path - the path to use
864 * @offset - the offset for the key we'll insert
865 *
866 * This function writes out a free space cache struct to disk for quick recovery
867 * on mount. This will return 0 if it was successfull in writing the cache out,
868 * and -1 if it was not.
869 */
870 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
871 struct btrfs_free_space_ctl *ctl,
872 struct btrfs_block_group_cache *block_group,
873 struct btrfs_trans_handle *trans,
874 struct btrfs_path *path, u64 offset)
875 {
876 struct btrfs_free_space_header *header;
877 struct extent_buffer *leaf;
878 struct rb_node *node;
879 struct list_head *pos, *n;
880 struct extent_state *cached_state = NULL;
881 struct btrfs_free_cluster *cluster = NULL;
882 struct extent_io_tree *unpin = NULL;
883 struct io_ctl io_ctl;
884 struct list_head bitmap_list;
885 struct btrfs_key key;
886 u64 start, extent_start, extent_end, len;
887 int entries = 0;
888 int bitmaps = 0;
889 int ret;
890 int err = -1;
891
892 INIT_LIST_HEAD(&bitmap_list);
893
894 if (!i_size_read(inode))
895 return -1;
896
897 ret = io_ctl_init(&io_ctl, inode, root);
898 if (ret)
899 return -1;
900
901 /* Get the cluster for this block_group if it exists */
902 if (block_group && !list_empty(&block_group->cluster_list))
903 cluster = list_entry(block_group->cluster_list.next,
904 struct btrfs_free_cluster,
905 block_group_list);
906
907 /* Lock all pages first so we can lock the extent safely. */
908 io_ctl_prepare_pages(&io_ctl, inode, 0);
909
910 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
911 0, &cached_state);
912
913 node = rb_first(&ctl->free_space_offset);
914 if (!node && cluster) {
915 node = rb_first(&cluster->root);
916 cluster = NULL;
917 }
918
919 /* Make sure we can fit our crcs into the first page */
920 if (io_ctl.check_crcs &&
921 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
922 WARN_ON(1);
923 goto out_nospc;
924 }
925
926 io_ctl_set_generation(&io_ctl, trans->transid);
927
928 /* Write out the extent entries */
929 while (node) {
930 struct btrfs_free_space *e;
931
932 e = rb_entry(node, struct btrfs_free_space, offset_index);
933 entries++;
934
935 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
936 e->bitmap);
937 if (ret)
938 goto out_nospc;
939
940 if (e->bitmap) {
941 list_add_tail(&e->list, &bitmap_list);
942 bitmaps++;
943 }
944 node = rb_next(node);
945 if (!node && cluster) {
946 node = rb_first(&cluster->root);
947 cluster = NULL;
948 }
949 }
950
951 /*
952 * We want to add any pinned extents to our free space cache
953 * so we don't leak the space
954 */
955
956 /*
957 * We shouldn't have switched the pinned extents yet so this is the
958 * right one
959 */
960 unpin = root->fs_info->pinned_extents;
961
962 if (block_group)
963 start = block_group->key.objectid;
964
965 while (block_group && (start < block_group->key.objectid +
966 block_group->key.offset)) {
967 ret = find_first_extent_bit(unpin, start,
968 &extent_start, &extent_end,
969 EXTENT_DIRTY);
970 if (ret) {
971 ret = 0;
972 break;
973 }
974
975 /* This pinned extent is out of our range */
976 if (extent_start >= block_group->key.objectid +
977 block_group->key.offset)
978 break;
979
980 extent_start = max(extent_start, start);
981 extent_end = min(block_group->key.objectid +
982 block_group->key.offset, extent_end + 1);
983 len = extent_end - extent_start;
984
985 entries++;
986 ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
987 if (ret)
988 goto out_nospc;
989
990 start = extent_end;
991 }
992
993 /* Write out the bitmaps */
994 list_for_each_safe(pos, n, &bitmap_list) {
995 struct btrfs_free_space *entry =
996 list_entry(pos, struct btrfs_free_space, list);
997
998 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
999 if (ret)
1000 goto out_nospc;
1001 list_del_init(&entry->list);
1002 }
1003
1004 /* Zero out the rest of the pages just to make sure */
1005 io_ctl_zero_remaining_pages(&io_ctl);
1006
1007 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1008 0, i_size_read(inode), &cached_state);
1009 io_ctl_drop_pages(&io_ctl);
1010 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1011 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1012
1013 if (ret)
1014 goto out;
1015
1016
1017 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1018
1019 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1020 key.offset = offset;
1021 key.type = 0;
1022
1023 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1024 if (ret < 0) {
1025 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1026 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1027 GFP_NOFS);
1028 goto out;
1029 }
1030 leaf = path->nodes[0];
1031 if (ret > 0) {
1032 struct btrfs_key found_key;
1033 BUG_ON(!path->slots[0]);
1034 path->slots[0]--;
1035 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1036 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1037 found_key.offset != offset) {
1038 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1039 inode->i_size - 1,
1040 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1041 NULL, GFP_NOFS);
1042 btrfs_release_path(path);
1043 goto out;
1044 }
1045 }
1046
1047 BTRFS_I(inode)->generation = trans->transid;
1048 header = btrfs_item_ptr(leaf, path->slots[0],
1049 struct btrfs_free_space_header);
1050 btrfs_set_free_space_entries(leaf, header, entries);
1051 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1052 btrfs_set_free_space_generation(leaf, header, trans->transid);
1053 btrfs_mark_buffer_dirty(leaf);
1054 btrfs_release_path(path);
1055
1056 err = 0;
1057 out:
1058 io_ctl_free(&io_ctl);
1059 if (err) {
1060 invalidate_inode_pages2(inode->i_mapping);
1061 BTRFS_I(inode)->generation = 0;
1062 }
1063 btrfs_update_inode(trans, root, inode);
1064 return err;
1065
1066 out_nospc:
1067 list_for_each_safe(pos, n, &bitmap_list) {
1068 struct btrfs_free_space *entry =
1069 list_entry(pos, struct btrfs_free_space, list);
1070 list_del_init(&entry->list);
1071 }
1072 io_ctl_drop_pages(&io_ctl);
1073 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1074 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1075 goto out;
1076 }
1077
1078 int btrfs_write_out_cache(struct btrfs_root *root,
1079 struct btrfs_trans_handle *trans,
1080 struct btrfs_block_group_cache *block_group,
1081 struct btrfs_path *path)
1082 {
1083 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1084 struct inode *inode;
1085 int ret = 0;
1086
1087 root = root->fs_info->tree_root;
1088
1089 spin_lock(&block_group->lock);
1090 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1091 spin_unlock(&block_group->lock);
1092 return 0;
1093 }
1094 spin_unlock(&block_group->lock);
1095
1096 inode = lookup_free_space_inode(root, block_group, path);
1097 if (IS_ERR(inode))
1098 return 0;
1099
1100 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1101 path, block_group->key.objectid);
1102 if (ret) {
1103 spin_lock(&block_group->lock);
1104 block_group->disk_cache_state = BTRFS_DC_ERROR;
1105 spin_unlock(&block_group->lock);
1106 ret = 0;
1107 #ifdef DEBUG
1108 printk(KERN_ERR "btrfs: failed to write free space cache "
1109 "for block group %llu\n", block_group->key.objectid);
1110 #endif
1111 }
1112
1113 iput(inode);
1114 return ret;
1115 }
1116
1117 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1118 u64 offset)
1119 {
1120 BUG_ON(offset < bitmap_start);
1121 offset -= bitmap_start;
1122 return (unsigned long)(div_u64(offset, unit));
1123 }
1124
1125 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1126 {
1127 return (unsigned long)(div_u64(bytes, unit));
1128 }
1129
1130 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1131 u64 offset)
1132 {
1133 u64 bitmap_start;
1134 u64 bytes_per_bitmap;
1135
1136 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1137 bitmap_start = offset - ctl->start;
1138 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1139 bitmap_start *= bytes_per_bitmap;
1140 bitmap_start += ctl->start;
1141
1142 return bitmap_start;
1143 }
1144
1145 static int tree_insert_offset(struct rb_root *root, u64 offset,
1146 struct rb_node *node, int bitmap)
1147 {
1148 struct rb_node **p = &root->rb_node;
1149 struct rb_node *parent = NULL;
1150 struct btrfs_free_space *info;
1151
1152 while (*p) {
1153 parent = *p;
1154 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1155
1156 if (offset < info->offset) {
1157 p = &(*p)->rb_left;
1158 } else if (offset > info->offset) {
1159 p = &(*p)->rb_right;
1160 } else {
1161 /*
1162 * we could have a bitmap entry and an extent entry
1163 * share the same offset. If this is the case, we want
1164 * the extent entry to always be found first if we do a
1165 * linear search through the tree, since we want to have
1166 * the quickest allocation time, and allocating from an
1167 * extent is faster than allocating from a bitmap. So
1168 * if we're inserting a bitmap and we find an entry at
1169 * this offset, we want to go right, or after this entry
1170 * logically. If we are inserting an extent and we've
1171 * found a bitmap, we want to go left, or before
1172 * logically.
1173 */
1174 if (bitmap) {
1175 if (info->bitmap) {
1176 WARN_ON_ONCE(1);
1177 return -EEXIST;
1178 }
1179 p = &(*p)->rb_right;
1180 } else {
1181 if (!info->bitmap) {
1182 WARN_ON_ONCE(1);
1183 return -EEXIST;
1184 }
1185 p = &(*p)->rb_left;
1186 }
1187 }
1188 }
1189
1190 rb_link_node(node, parent, p);
1191 rb_insert_color(node, root);
1192
1193 return 0;
1194 }
1195
1196 /*
1197 * searches the tree for the given offset.
1198 *
1199 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1200 * want a section that has at least bytes size and comes at or after the given
1201 * offset.
1202 */
1203 static struct btrfs_free_space *
1204 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1205 u64 offset, int bitmap_only, int fuzzy)
1206 {
1207 struct rb_node *n = ctl->free_space_offset.rb_node;
1208 struct btrfs_free_space *entry, *prev = NULL;
1209
1210 /* find entry that is closest to the 'offset' */
1211 while (1) {
1212 if (!n) {
1213 entry = NULL;
1214 break;
1215 }
1216
1217 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1218 prev = entry;
1219
1220 if (offset < entry->offset)
1221 n = n->rb_left;
1222 else if (offset > entry->offset)
1223 n = n->rb_right;
1224 else
1225 break;
1226 }
1227
1228 if (bitmap_only) {
1229 if (!entry)
1230 return NULL;
1231 if (entry->bitmap)
1232 return entry;
1233
1234 /*
1235 * bitmap entry and extent entry may share same offset,
1236 * in that case, bitmap entry comes after extent entry.
1237 */
1238 n = rb_next(n);
1239 if (!n)
1240 return NULL;
1241 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1242 if (entry->offset != offset)
1243 return NULL;
1244
1245 WARN_ON(!entry->bitmap);
1246 return entry;
1247 } else if (entry) {
1248 if (entry->bitmap) {
1249 /*
1250 * if previous extent entry covers the offset,
1251 * we should return it instead of the bitmap entry
1252 */
1253 n = &entry->offset_index;
1254 while (1) {
1255 n = rb_prev(n);
1256 if (!n)
1257 break;
1258 prev = rb_entry(n, struct btrfs_free_space,
1259 offset_index);
1260 if (!prev->bitmap) {
1261 if (prev->offset + prev->bytes > offset)
1262 entry = prev;
1263 break;
1264 }
1265 }
1266 }
1267 return entry;
1268 }
1269
1270 if (!prev)
1271 return NULL;
1272
1273 /* find last entry before the 'offset' */
1274 entry = prev;
1275 if (entry->offset > offset) {
1276 n = rb_prev(&entry->offset_index);
1277 if (n) {
1278 entry = rb_entry(n, struct btrfs_free_space,
1279 offset_index);
1280 BUG_ON(entry->offset > offset);
1281 } else {
1282 if (fuzzy)
1283 return entry;
1284 else
1285 return NULL;
1286 }
1287 }
1288
1289 if (entry->bitmap) {
1290 n = &entry->offset_index;
1291 while (1) {
1292 n = rb_prev(n);
1293 if (!n)
1294 break;
1295 prev = rb_entry(n, struct btrfs_free_space,
1296 offset_index);
1297 if (!prev->bitmap) {
1298 if (prev->offset + prev->bytes > offset)
1299 return prev;
1300 break;
1301 }
1302 }
1303 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1304 return entry;
1305 } else if (entry->offset + entry->bytes > offset)
1306 return entry;
1307
1308 if (!fuzzy)
1309 return NULL;
1310
1311 while (1) {
1312 if (entry->bitmap) {
1313 if (entry->offset + BITS_PER_BITMAP *
1314 ctl->unit > offset)
1315 break;
1316 } else {
1317 if (entry->offset + entry->bytes > offset)
1318 break;
1319 }
1320
1321 n = rb_next(&entry->offset_index);
1322 if (!n)
1323 return NULL;
1324 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1325 }
1326 return entry;
1327 }
1328
1329 static inline void
1330 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1331 struct btrfs_free_space *info)
1332 {
1333 rb_erase(&info->offset_index, &ctl->free_space_offset);
1334 ctl->free_extents--;
1335 }
1336
1337 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1338 struct btrfs_free_space *info)
1339 {
1340 __unlink_free_space(ctl, info);
1341 ctl->free_space -= info->bytes;
1342 }
1343
1344 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1345 struct btrfs_free_space *info)
1346 {
1347 int ret = 0;
1348
1349 BUG_ON(!info->bitmap && !info->bytes);
1350 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1351 &info->offset_index, (info->bitmap != NULL));
1352 if (ret)
1353 return ret;
1354
1355 ctl->free_space += info->bytes;
1356 ctl->free_extents++;
1357 return ret;
1358 }
1359
1360 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1361 {
1362 struct btrfs_block_group_cache *block_group = ctl->private;
1363 u64 max_bytes;
1364 u64 bitmap_bytes;
1365 u64 extent_bytes;
1366 u64 size = block_group->key.offset;
1367 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1368 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1369
1370 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1371
1372 /*
1373 * The goal is to keep the total amount of memory used per 1gb of space
1374 * at or below 32k, so we need to adjust how much memory we allow to be
1375 * used by extent based free space tracking
1376 */
1377 if (size < 1024 * 1024 * 1024)
1378 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1379 else
1380 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1381 div64_u64(size, 1024 * 1024 * 1024);
1382
1383 /*
1384 * we want to account for 1 more bitmap than what we have so we can make
1385 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1386 * we add more bitmaps.
1387 */
1388 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1389
1390 if (bitmap_bytes >= max_bytes) {
1391 ctl->extents_thresh = 0;
1392 return;
1393 }
1394
1395 /*
1396 * we want the extent entry threshold to always be at most 1/2 the maxw
1397 * bytes we can have, or whatever is less than that.
1398 */
1399 extent_bytes = max_bytes - bitmap_bytes;
1400 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1401
1402 ctl->extents_thresh =
1403 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1404 }
1405
1406 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1407 struct btrfs_free_space *info,
1408 u64 offset, u64 bytes)
1409 {
1410 unsigned long start, count;
1411
1412 start = offset_to_bit(info->offset, ctl->unit, offset);
1413 count = bytes_to_bits(bytes, ctl->unit);
1414 BUG_ON(start + count > BITS_PER_BITMAP);
1415
1416 bitmap_clear(info->bitmap, start, count);
1417
1418 info->bytes -= bytes;
1419 }
1420
1421 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1422 struct btrfs_free_space *info, u64 offset,
1423 u64 bytes)
1424 {
1425 __bitmap_clear_bits(ctl, info, offset, bytes);
1426 ctl->free_space -= bytes;
1427 }
1428
1429 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1430 struct btrfs_free_space *info, u64 offset,
1431 u64 bytes)
1432 {
1433 unsigned long start, count;
1434
1435 start = offset_to_bit(info->offset, ctl->unit, offset);
1436 count = bytes_to_bits(bytes, ctl->unit);
1437 BUG_ON(start + count > BITS_PER_BITMAP);
1438
1439 bitmap_set(info->bitmap, start, count);
1440
1441 info->bytes += bytes;
1442 ctl->free_space += bytes;
1443 }
1444
1445 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1446 struct btrfs_free_space *bitmap_info, u64 *offset,
1447 u64 *bytes)
1448 {
1449 unsigned long found_bits = 0;
1450 unsigned long bits, i;
1451 unsigned long next_zero;
1452
1453 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1454 max_t(u64, *offset, bitmap_info->offset));
1455 bits = bytes_to_bits(*bytes, ctl->unit);
1456
1457 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1458 i < BITS_PER_BITMAP;
1459 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1460 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1461 BITS_PER_BITMAP, i);
1462 if ((next_zero - i) >= bits) {
1463 found_bits = next_zero - i;
1464 break;
1465 }
1466 i = next_zero;
1467 }
1468
1469 if (found_bits) {
1470 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1471 *bytes = (u64)(found_bits) * ctl->unit;
1472 return 0;
1473 }
1474
1475 return -1;
1476 }
1477
1478 static struct btrfs_free_space *
1479 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1480 {
1481 struct btrfs_free_space *entry;
1482 struct rb_node *node;
1483 int ret;
1484
1485 if (!ctl->free_space_offset.rb_node)
1486 return NULL;
1487
1488 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1489 if (!entry)
1490 return NULL;
1491
1492 for (node = &entry->offset_index; node; node = rb_next(node)) {
1493 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1494 if (entry->bytes < *bytes)
1495 continue;
1496
1497 if (entry->bitmap) {
1498 ret = search_bitmap(ctl, entry, offset, bytes);
1499 if (!ret)
1500 return entry;
1501 continue;
1502 }
1503
1504 *offset = entry->offset;
1505 *bytes = entry->bytes;
1506 return entry;
1507 }
1508
1509 return NULL;
1510 }
1511
1512 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1513 struct btrfs_free_space *info, u64 offset)
1514 {
1515 info->offset = offset_to_bitmap(ctl, offset);
1516 info->bytes = 0;
1517 INIT_LIST_HEAD(&info->list);
1518 link_free_space(ctl, info);
1519 ctl->total_bitmaps++;
1520
1521 ctl->op->recalc_thresholds(ctl);
1522 }
1523
1524 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1525 struct btrfs_free_space *bitmap_info)
1526 {
1527 unlink_free_space(ctl, bitmap_info);
1528 kfree(bitmap_info->bitmap);
1529 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1530 ctl->total_bitmaps--;
1531 ctl->op->recalc_thresholds(ctl);
1532 }
1533
1534 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1535 struct btrfs_free_space *bitmap_info,
1536 u64 *offset, u64 *bytes)
1537 {
1538 u64 end;
1539 u64 search_start, search_bytes;
1540 int ret;
1541
1542 again:
1543 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1544
1545 /*
1546 * XXX - this can go away after a few releases.
1547 *
1548 * since the only user of btrfs_remove_free_space is the tree logging
1549 * stuff, and the only way to test that is under crash conditions, we
1550 * want to have this debug stuff here just in case somethings not
1551 * working. Search the bitmap for the space we are trying to use to
1552 * make sure its actually there. If its not there then we need to stop
1553 * because something has gone wrong.
1554 */
1555 search_start = *offset;
1556 search_bytes = *bytes;
1557 search_bytes = min(search_bytes, end - search_start + 1);
1558 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1559 BUG_ON(ret < 0 || search_start != *offset);
1560
1561 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1562 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1563 *bytes -= end - *offset + 1;
1564 *offset = end + 1;
1565 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1566 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1567 *bytes = 0;
1568 }
1569
1570 if (*bytes) {
1571 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1572 if (!bitmap_info->bytes)
1573 free_bitmap(ctl, bitmap_info);
1574
1575 /*
1576 * no entry after this bitmap, but we still have bytes to
1577 * remove, so something has gone wrong.
1578 */
1579 if (!next)
1580 return -EINVAL;
1581
1582 bitmap_info = rb_entry(next, struct btrfs_free_space,
1583 offset_index);
1584
1585 /*
1586 * if the next entry isn't a bitmap we need to return to let the
1587 * extent stuff do its work.
1588 */
1589 if (!bitmap_info->bitmap)
1590 return -EAGAIN;
1591
1592 /*
1593 * Ok the next item is a bitmap, but it may not actually hold
1594 * the information for the rest of this free space stuff, so
1595 * look for it, and if we don't find it return so we can try
1596 * everything over again.
1597 */
1598 search_start = *offset;
1599 search_bytes = *bytes;
1600 ret = search_bitmap(ctl, bitmap_info, &search_start,
1601 &search_bytes);
1602 if (ret < 0 || search_start != *offset)
1603 return -EAGAIN;
1604
1605 goto again;
1606 } else if (!bitmap_info->bytes)
1607 free_bitmap(ctl, bitmap_info);
1608
1609 return 0;
1610 }
1611
1612 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1613 struct btrfs_free_space *info, u64 offset,
1614 u64 bytes)
1615 {
1616 u64 bytes_to_set = 0;
1617 u64 end;
1618
1619 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1620
1621 bytes_to_set = min(end - offset, bytes);
1622
1623 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1624
1625 return bytes_to_set;
1626
1627 }
1628
1629 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1630 struct btrfs_free_space *info)
1631 {
1632 struct btrfs_block_group_cache *block_group = ctl->private;
1633
1634 /*
1635 * If we are below the extents threshold then we can add this as an
1636 * extent, and don't have to deal with the bitmap
1637 */
1638 if (ctl->free_extents < ctl->extents_thresh) {
1639 /*
1640 * If this block group has some small extents we don't want to
1641 * use up all of our free slots in the cache with them, we want
1642 * to reserve them to larger extents, however if we have plent
1643 * of cache left then go ahead an dadd them, no sense in adding
1644 * the overhead of a bitmap if we don't have to.
1645 */
1646 if (info->bytes <= block_group->sectorsize * 4) {
1647 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1648 return false;
1649 } else {
1650 return false;
1651 }
1652 }
1653
1654 /*
1655 * some block groups are so tiny they can't be enveloped by a bitmap, so
1656 * don't even bother to create a bitmap for this
1657 */
1658 if (BITS_PER_BITMAP * block_group->sectorsize >
1659 block_group->key.offset)
1660 return false;
1661
1662 return true;
1663 }
1664
1665 static struct btrfs_free_space_op free_space_op = {
1666 .recalc_thresholds = recalculate_thresholds,
1667 .use_bitmap = use_bitmap,
1668 };
1669
1670 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1671 struct btrfs_free_space *info)
1672 {
1673 struct btrfs_free_space *bitmap_info;
1674 struct btrfs_block_group_cache *block_group = NULL;
1675 int added = 0;
1676 u64 bytes, offset, bytes_added;
1677 int ret;
1678
1679 bytes = info->bytes;
1680 offset = info->offset;
1681
1682 if (!ctl->op->use_bitmap(ctl, info))
1683 return 0;
1684
1685 if (ctl->op == &free_space_op)
1686 block_group = ctl->private;
1687 again:
1688 /*
1689 * Since we link bitmaps right into the cluster we need to see if we
1690 * have a cluster here, and if so and it has our bitmap we need to add
1691 * the free space to that bitmap.
1692 */
1693 if (block_group && !list_empty(&block_group->cluster_list)) {
1694 struct btrfs_free_cluster *cluster;
1695 struct rb_node *node;
1696 struct btrfs_free_space *entry;
1697
1698 cluster = list_entry(block_group->cluster_list.next,
1699 struct btrfs_free_cluster,
1700 block_group_list);
1701 spin_lock(&cluster->lock);
1702 node = rb_first(&cluster->root);
1703 if (!node) {
1704 spin_unlock(&cluster->lock);
1705 goto no_cluster_bitmap;
1706 }
1707
1708 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1709 if (!entry->bitmap) {
1710 spin_unlock(&cluster->lock);
1711 goto no_cluster_bitmap;
1712 }
1713
1714 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1715 bytes_added = add_bytes_to_bitmap(ctl, entry,
1716 offset, bytes);
1717 bytes -= bytes_added;
1718 offset += bytes_added;
1719 }
1720 spin_unlock(&cluster->lock);
1721 if (!bytes) {
1722 ret = 1;
1723 goto out;
1724 }
1725 }
1726
1727 no_cluster_bitmap:
1728 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1729 1, 0);
1730 if (!bitmap_info) {
1731 BUG_ON(added);
1732 goto new_bitmap;
1733 }
1734
1735 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1736 bytes -= bytes_added;
1737 offset += bytes_added;
1738 added = 0;
1739
1740 if (!bytes) {
1741 ret = 1;
1742 goto out;
1743 } else
1744 goto again;
1745
1746 new_bitmap:
1747 if (info && info->bitmap) {
1748 add_new_bitmap(ctl, info, offset);
1749 added = 1;
1750 info = NULL;
1751 goto again;
1752 } else {
1753 spin_unlock(&ctl->tree_lock);
1754
1755 /* no pre-allocated info, allocate a new one */
1756 if (!info) {
1757 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1758 GFP_NOFS);
1759 if (!info) {
1760 spin_lock(&ctl->tree_lock);
1761 ret = -ENOMEM;
1762 goto out;
1763 }
1764 }
1765
1766 /* allocate the bitmap */
1767 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1768 spin_lock(&ctl->tree_lock);
1769 if (!info->bitmap) {
1770 ret = -ENOMEM;
1771 goto out;
1772 }
1773 goto again;
1774 }
1775
1776 out:
1777 if (info) {
1778 if (info->bitmap)
1779 kfree(info->bitmap);
1780 kmem_cache_free(btrfs_free_space_cachep, info);
1781 }
1782
1783 return ret;
1784 }
1785
1786 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1787 struct btrfs_free_space *info, bool update_stat)
1788 {
1789 struct btrfs_free_space *left_info;
1790 struct btrfs_free_space *right_info;
1791 bool merged = false;
1792 u64 offset = info->offset;
1793 u64 bytes = info->bytes;
1794
1795 /*
1796 * first we want to see if there is free space adjacent to the range we
1797 * are adding, if there is remove that struct and add a new one to
1798 * cover the entire range
1799 */
1800 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1801 if (right_info && rb_prev(&right_info->offset_index))
1802 left_info = rb_entry(rb_prev(&right_info->offset_index),
1803 struct btrfs_free_space, offset_index);
1804 else
1805 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1806
1807 if (right_info && !right_info->bitmap) {
1808 if (update_stat)
1809 unlink_free_space(ctl, right_info);
1810 else
1811 __unlink_free_space(ctl, right_info);
1812 info->bytes += right_info->bytes;
1813 kmem_cache_free(btrfs_free_space_cachep, right_info);
1814 merged = true;
1815 }
1816
1817 if (left_info && !left_info->bitmap &&
1818 left_info->offset + left_info->bytes == offset) {
1819 if (update_stat)
1820 unlink_free_space(ctl, left_info);
1821 else
1822 __unlink_free_space(ctl, left_info);
1823 info->offset = left_info->offset;
1824 info->bytes += left_info->bytes;
1825 kmem_cache_free(btrfs_free_space_cachep, left_info);
1826 merged = true;
1827 }
1828
1829 return merged;
1830 }
1831
1832 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1833 u64 offset, u64 bytes)
1834 {
1835 struct btrfs_free_space *info;
1836 int ret = 0;
1837
1838 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1839 if (!info)
1840 return -ENOMEM;
1841
1842 info->offset = offset;
1843 info->bytes = bytes;
1844
1845 spin_lock(&ctl->tree_lock);
1846
1847 if (try_merge_free_space(ctl, info, true))
1848 goto link;
1849
1850 /*
1851 * There was no extent directly to the left or right of this new
1852 * extent then we know we're going to have to allocate a new extent, so
1853 * before we do that see if we need to drop this into a bitmap
1854 */
1855 ret = insert_into_bitmap(ctl, info);
1856 if (ret < 0) {
1857 goto out;
1858 } else if (ret) {
1859 ret = 0;
1860 goto out;
1861 }
1862 link:
1863 ret = link_free_space(ctl, info);
1864 if (ret)
1865 kmem_cache_free(btrfs_free_space_cachep, info);
1866 out:
1867 spin_unlock(&ctl->tree_lock);
1868
1869 if (ret) {
1870 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1871 BUG_ON(ret == -EEXIST);
1872 }
1873
1874 return ret;
1875 }
1876
1877 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1878 u64 offset, u64 bytes)
1879 {
1880 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1881 struct btrfs_free_space *info;
1882 struct btrfs_free_space *next_info = NULL;
1883 int ret = 0;
1884
1885 spin_lock(&ctl->tree_lock);
1886
1887 again:
1888 info = tree_search_offset(ctl, offset, 0, 0);
1889 if (!info) {
1890 /*
1891 * oops didn't find an extent that matched the space we wanted
1892 * to remove, look for a bitmap instead
1893 */
1894 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1895 1, 0);
1896 if (!info) {
1897 /* the tree logging code might be calling us before we
1898 * have fully loaded the free space rbtree for this
1899 * block group. So it is possible the entry won't
1900 * be in the rbtree yet at all. The caching code
1901 * will make sure not to put it in the rbtree if
1902 * the logging code has pinned it.
1903 */
1904 goto out_lock;
1905 }
1906 }
1907
1908 if (info->bytes < bytes && rb_next(&info->offset_index)) {
1909 u64 end;
1910 next_info = rb_entry(rb_next(&info->offset_index),
1911 struct btrfs_free_space,
1912 offset_index);
1913
1914 if (next_info->bitmap)
1915 end = next_info->offset +
1916 BITS_PER_BITMAP * ctl->unit - 1;
1917 else
1918 end = next_info->offset + next_info->bytes;
1919
1920 if (next_info->bytes < bytes ||
1921 next_info->offset > offset || offset > end) {
1922 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1923 " trying to use %llu\n",
1924 (unsigned long long)info->offset,
1925 (unsigned long long)info->bytes,
1926 (unsigned long long)bytes);
1927 WARN_ON(1);
1928 ret = -EINVAL;
1929 goto out_lock;
1930 }
1931
1932 info = next_info;
1933 }
1934
1935 if (info->bytes == bytes) {
1936 unlink_free_space(ctl, info);
1937 if (info->bitmap) {
1938 kfree(info->bitmap);
1939 ctl->total_bitmaps--;
1940 }
1941 kmem_cache_free(btrfs_free_space_cachep, info);
1942 ret = 0;
1943 goto out_lock;
1944 }
1945
1946 if (!info->bitmap && info->offset == offset) {
1947 unlink_free_space(ctl, info);
1948 info->offset += bytes;
1949 info->bytes -= bytes;
1950 ret = link_free_space(ctl, info);
1951 WARN_ON(ret);
1952 goto out_lock;
1953 }
1954
1955 if (!info->bitmap && info->offset <= offset &&
1956 info->offset + info->bytes >= offset + bytes) {
1957 u64 old_start = info->offset;
1958 /*
1959 * we're freeing space in the middle of the info,
1960 * this can happen during tree log replay
1961 *
1962 * first unlink the old info and then
1963 * insert it again after the hole we're creating
1964 */
1965 unlink_free_space(ctl, info);
1966 if (offset + bytes < info->offset + info->bytes) {
1967 u64 old_end = info->offset + info->bytes;
1968
1969 info->offset = offset + bytes;
1970 info->bytes = old_end - info->offset;
1971 ret = link_free_space(ctl, info);
1972 WARN_ON(ret);
1973 if (ret)
1974 goto out_lock;
1975 } else {
1976 /* the hole we're creating ends at the end
1977 * of the info struct, just free the info
1978 */
1979 kmem_cache_free(btrfs_free_space_cachep, info);
1980 }
1981 spin_unlock(&ctl->tree_lock);
1982
1983 /* step two, insert a new info struct to cover
1984 * anything before the hole
1985 */
1986 ret = btrfs_add_free_space(block_group, old_start,
1987 offset - old_start);
1988 WARN_ON(ret); /* -ENOMEM */
1989 goto out;
1990 }
1991
1992 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1993 if (ret == -EAGAIN)
1994 goto again;
1995 BUG_ON(ret); /* logic error */
1996 out_lock:
1997 spin_unlock(&ctl->tree_lock);
1998 out:
1999 return ret;
2000 }
2001
2002 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2003 u64 bytes)
2004 {
2005 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2006 struct btrfs_free_space *info;
2007 struct rb_node *n;
2008 int count = 0;
2009
2010 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2011 info = rb_entry(n, struct btrfs_free_space, offset_index);
2012 if (info->bytes >= bytes)
2013 count++;
2014 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
2015 (unsigned long long)info->offset,
2016 (unsigned long long)info->bytes,
2017 (info->bitmap) ? "yes" : "no");
2018 }
2019 printk(KERN_INFO "block group has cluster?: %s\n",
2020 list_empty(&block_group->cluster_list) ? "no" : "yes");
2021 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
2022 "\n", count);
2023 }
2024
2025 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2026 {
2027 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2028
2029 spin_lock_init(&ctl->tree_lock);
2030 ctl->unit = block_group->sectorsize;
2031 ctl->start = block_group->key.objectid;
2032 ctl->private = block_group;
2033 ctl->op = &free_space_op;
2034
2035 /*
2036 * we only want to have 32k of ram per block group for keeping
2037 * track of free space, and if we pass 1/2 of that we want to
2038 * start converting things over to using bitmaps
2039 */
2040 ctl->extents_thresh = ((1024 * 32) / 2) /
2041 sizeof(struct btrfs_free_space);
2042 }
2043
2044 /*
2045 * for a given cluster, put all of its extents back into the free
2046 * space cache. If the block group passed doesn't match the block group
2047 * pointed to by the cluster, someone else raced in and freed the
2048 * cluster already. In that case, we just return without changing anything
2049 */
2050 static int
2051 __btrfs_return_cluster_to_free_space(
2052 struct btrfs_block_group_cache *block_group,
2053 struct btrfs_free_cluster *cluster)
2054 {
2055 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2056 struct btrfs_free_space *entry;
2057 struct rb_node *node;
2058
2059 spin_lock(&cluster->lock);
2060 if (cluster->block_group != block_group)
2061 goto out;
2062
2063 cluster->block_group = NULL;
2064 cluster->window_start = 0;
2065 list_del_init(&cluster->block_group_list);
2066
2067 node = rb_first(&cluster->root);
2068 while (node) {
2069 bool bitmap;
2070
2071 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2072 node = rb_next(&entry->offset_index);
2073 rb_erase(&entry->offset_index, &cluster->root);
2074
2075 bitmap = (entry->bitmap != NULL);
2076 if (!bitmap)
2077 try_merge_free_space(ctl, entry, false);
2078 tree_insert_offset(&ctl->free_space_offset,
2079 entry->offset, &entry->offset_index, bitmap);
2080 }
2081 cluster->root = RB_ROOT;
2082
2083 out:
2084 spin_unlock(&cluster->lock);
2085 btrfs_put_block_group(block_group);
2086 return 0;
2087 }
2088
2089 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
2090 {
2091 struct btrfs_free_space *info;
2092 struct rb_node *node;
2093
2094 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2095 info = rb_entry(node, struct btrfs_free_space, offset_index);
2096 if (!info->bitmap) {
2097 unlink_free_space(ctl, info);
2098 kmem_cache_free(btrfs_free_space_cachep, info);
2099 } else {
2100 free_bitmap(ctl, info);
2101 }
2102 if (need_resched()) {
2103 spin_unlock(&ctl->tree_lock);
2104 cond_resched();
2105 spin_lock(&ctl->tree_lock);
2106 }
2107 }
2108 }
2109
2110 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2111 {
2112 spin_lock(&ctl->tree_lock);
2113 __btrfs_remove_free_space_cache_locked(ctl);
2114 spin_unlock(&ctl->tree_lock);
2115 }
2116
2117 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2118 {
2119 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2120 struct btrfs_free_cluster *cluster;
2121 struct list_head *head;
2122
2123 spin_lock(&ctl->tree_lock);
2124 while ((head = block_group->cluster_list.next) !=
2125 &block_group->cluster_list) {
2126 cluster = list_entry(head, struct btrfs_free_cluster,
2127 block_group_list);
2128
2129 WARN_ON(cluster->block_group != block_group);
2130 __btrfs_return_cluster_to_free_space(block_group, cluster);
2131 if (need_resched()) {
2132 spin_unlock(&ctl->tree_lock);
2133 cond_resched();
2134 spin_lock(&ctl->tree_lock);
2135 }
2136 }
2137 __btrfs_remove_free_space_cache_locked(ctl);
2138 spin_unlock(&ctl->tree_lock);
2139
2140 }
2141
2142 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2143 u64 offset, u64 bytes, u64 empty_size)
2144 {
2145 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2146 struct btrfs_free_space *entry = NULL;
2147 u64 bytes_search = bytes + empty_size;
2148 u64 ret = 0;
2149
2150 spin_lock(&ctl->tree_lock);
2151 entry = find_free_space(ctl, &offset, &bytes_search);
2152 if (!entry)
2153 goto out;
2154
2155 ret = offset;
2156 if (entry->bitmap) {
2157 bitmap_clear_bits(ctl, entry, offset, bytes);
2158 if (!entry->bytes)
2159 free_bitmap(ctl, entry);
2160 } else {
2161 unlink_free_space(ctl, entry);
2162 entry->offset += bytes;
2163 entry->bytes -= bytes;
2164 if (!entry->bytes)
2165 kmem_cache_free(btrfs_free_space_cachep, entry);
2166 else
2167 link_free_space(ctl, entry);
2168 }
2169
2170 out:
2171 spin_unlock(&ctl->tree_lock);
2172
2173 return ret;
2174 }
2175
2176 /*
2177 * given a cluster, put all of its extents back into the free space
2178 * cache. If a block group is passed, this function will only free
2179 * a cluster that belongs to the passed block group.
2180 *
2181 * Otherwise, it'll get a reference on the block group pointed to by the
2182 * cluster and remove the cluster from it.
2183 */
2184 int btrfs_return_cluster_to_free_space(
2185 struct btrfs_block_group_cache *block_group,
2186 struct btrfs_free_cluster *cluster)
2187 {
2188 struct btrfs_free_space_ctl *ctl;
2189 int ret;
2190
2191 /* first, get a safe pointer to the block group */
2192 spin_lock(&cluster->lock);
2193 if (!block_group) {
2194 block_group = cluster->block_group;
2195 if (!block_group) {
2196 spin_unlock(&cluster->lock);
2197 return 0;
2198 }
2199 } else if (cluster->block_group != block_group) {
2200 /* someone else has already freed it don't redo their work */
2201 spin_unlock(&cluster->lock);
2202 return 0;
2203 }
2204 atomic_inc(&block_group->count);
2205 spin_unlock(&cluster->lock);
2206
2207 ctl = block_group->free_space_ctl;
2208
2209 /* now return any extents the cluster had on it */
2210 spin_lock(&ctl->tree_lock);
2211 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2212 spin_unlock(&ctl->tree_lock);
2213
2214 /* finally drop our ref */
2215 btrfs_put_block_group(block_group);
2216 return ret;
2217 }
2218
2219 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2220 struct btrfs_free_cluster *cluster,
2221 struct btrfs_free_space *entry,
2222 u64 bytes, u64 min_start)
2223 {
2224 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2225 int err;
2226 u64 search_start = cluster->window_start;
2227 u64 search_bytes = bytes;
2228 u64 ret = 0;
2229
2230 search_start = min_start;
2231 search_bytes = bytes;
2232
2233 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2234 if (err)
2235 return 0;
2236
2237 ret = search_start;
2238 __bitmap_clear_bits(ctl, entry, ret, bytes);
2239
2240 return ret;
2241 }
2242
2243 /*
2244 * given a cluster, try to allocate 'bytes' from it, returns 0
2245 * if it couldn't find anything suitably large, or a logical disk offset
2246 * if things worked out
2247 */
2248 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2249 struct btrfs_free_cluster *cluster, u64 bytes,
2250 u64 min_start)
2251 {
2252 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2253 struct btrfs_free_space *entry = NULL;
2254 struct rb_node *node;
2255 u64 ret = 0;
2256
2257 spin_lock(&cluster->lock);
2258 if (bytes > cluster->max_size)
2259 goto out;
2260
2261 if (cluster->block_group != block_group)
2262 goto out;
2263
2264 node = rb_first(&cluster->root);
2265 if (!node)
2266 goto out;
2267
2268 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2269 while(1) {
2270 if (entry->bytes < bytes ||
2271 (!entry->bitmap && entry->offset < min_start)) {
2272 node = rb_next(&entry->offset_index);
2273 if (!node)
2274 break;
2275 entry = rb_entry(node, struct btrfs_free_space,
2276 offset_index);
2277 continue;
2278 }
2279
2280 if (entry->bitmap) {
2281 ret = btrfs_alloc_from_bitmap(block_group,
2282 cluster, entry, bytes,
2283 cluster->window_start);
2284 if (ret == 0) {
2285 node = rb_next(&entry->offset_index);
2286 if (!node)
2287 break;
2288 entry = rb_entry(node, struct btrfs_free_space,
2289 offset_index);
2290 continue;
2291 }
2292 cluster->window_start += bytes;
2293 } else {
2294 ret = entry->offset;
2295
2296 entry->offset += bytes;
2297 entry->bytes -= bytes;
2298 }
2299
2300 if (entry->bytes == 0)
2301 rb_erase(&entry->offset_index, &cluster->root);
2302 break;
2303 }
2304 out:
2305 spin_unlock(&cluster->lock);
2306
2307 if (!ret)
2308 return 0;
2309
2310 spin_lock(&ctl->tree_lock);
2311
2312 ctl->free_space -= bytes;
2313 if (entry->bytes == 0) {
2314 ctl->free_extents--;
2315 if (entry->bitmap) {
2316 kfree(entry->bitmap);
2317 ctl->total_bitmaps--;
2318 ctl->op->recalc_thresholds(ctl);
2319 }
2320 kmem_cache_free(btrfs_free_space_cachep, entry);
2321 }
2322
2323 spin_unlock(&ctl->tree_lock);
2324
2325 return ret;
2326 }
2327
2328 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2329 struct btrfs_free_space *entry,
2330 struct btrfs_free_cluster *cluster,
2331 u64 offset, u64 bytes,
2332 u64 cont1_bytes, u64 min_bytes)
2333 {
2334 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2335 unsigned long next_zero;
2336 unsigned long i;
2337 unsigned long want_bits;
2338 unsigned long min_bits;
2339 unsigned long found_bits;
2340 unsigned long start = 0;
2341 unsigned long total_found = 0;
2342 int ret;
2343
2344 i = offset_to_bit(entry->offset, block_group->sectorsize,
2345 max_t(u64, offset, entry->offset));
2346 want_bits = bytes_to_bits(bytes, block_group->sectorsize);
2347 min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2348
2349 again:
2350 found_bits = 0;
2351 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2352 i < BITS_PER_BITMAP;
2353 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2354 next_zero = find_next_zero_bit(entry->bitmap,
2355 BITS_PER_BITMAP, i);
2356 if (next_zero - i >= min_bits) {
2357 found_bits = next_zero - i;
2358 break;
2359 }
2360 i = next_zero;
2361 }
2362
2363 if (!found_bits)
2364 return -ENOSPC;
2365
2366 if (!total_found) {
2367 start = i;
2368 cluster->max_size = 0;
2369 }
2370
2371 total_found += found_bits;
2372
2373 if (cluster->max_size < found_bits * block_group->sectorsize)
2374 cluster->max_size = found_bits * block_group->sectorsize;
2375
2376 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2377 i = next_zero + 1;
2378 goto again;
2379 }
2380
2381 cluster->window_start = start * block_group->sectorsize +
2382 entry->offset;
2383 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2384 ret = tree_insert_offset(&cluster->root, entry->offset,
2385 &entry->offset_index, 1);
2386 BUG_ON(ret); /* -EEXIST; Logic error */
2387
2388 trace_btrfs_setup_cluster(block_group, cluster,
2389 total_found * block_group->sectorsize, 1);
2390 return 0;
2391 }
2392
2393 /*
2394 * This searches the block group for just extents to fill the cluster with.
2395 * Try to find a cluster with at least bytes total bytes, at least one
2396 * extent of cont1_bytes, and other clusters of at least min_bytes.
2397 */
2398 static noinline int
2399 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2400 struct btrfs_free_cluster *cluster,
2401 struct list_head *bitmaps, u64 offset, u64 bytes,
2402 u64 cont1_bytes, u64 min_bytes)
2403 {
2404 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2405 struct btrfs_free_space *first = NULL;
2406 struct btrfs_free_space *entry = NULL;
2407 struct btrfs_free_space *last;
2408 struct rb_node *node;
2409 u64 window_start;
2410 u64 window_free;
2411 u64 max_extent;
2412 u64 total_size = 0;
2413
2414 entry = tree_search_offset(ctl, offset, 0, 1);
2415 if (!entry)
2416 return -ENOSPC;
2417
2418 /*
2419 * We don't want bitmaps, so just move along until we find a normal
2420 * extent entry.
2421 */
2422 while (entry->bitmap || entry->bytes < min_bytes) {
2423 if (entry->bitmap && list_empty(&entry->list))
2424 list_add_tail(&entry->list, bitmaps);
2425 node = rb_next(&entry->offset_index);
2426 if (!node)
2427 return -ENOSPC;
2428 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2429 }
2430
2431 window_start = entry->offset;
2432 window_free = entry->bytes;
2433 max_extent = entry->bytes;
2434 first = entry;
2435 last = entry;
2436
2437 for (node = rb_next(&entry->offset_index); node;
2438 node = rb_next(&entry->offset_index)) {
2439 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2440
2441 if (entry->bitmap) {
2442 if (list_empty(&entry->list))
2443 list_add_tail(&entry->list, bitmaps);
2444 continue;
2445 }
2446
2447 if (entry->bytes < min_bytes)
2448 continue;
2449
2450 last = entry;
2451 window_free += entry->bytes;
2452 if (entry->bytes > max_extent)
2453 max_extent = entry->bytes;
2454 }
2455
2456 if (window_free < bytes || max_extent < cont1_bytes)
2457 return -ENOSPC;
2458
2459 cluster->window_start = first->offset;
2460
2461 node = &first->offset_index;
2462
2463 /*
2464 * now we've found our entries, pull them out of the free space
2465 * cache and put them into the cluster rbtree
2466 */
2467 do {
2468 int ret;
2469
2470 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2471 node = rb_next(&entry->offset_index);
2472 if (entry->bitmap || entry->bytes < min_bytes)
2473 continue;
2474
2475 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2476 ret = tree_insert_offset(&cluster->root, entry->offset,
2477 &entry->offset_index, 0);
2478 total_size += entry->bytes;
2479 BUG_ON(ret); /* -EEXIST; Logic error */
2480 } while (node && entry != last);
2481
2482 cluster->max_size = max_extent;
2483 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2484 return 0;
2485 }
2486
2487 /*
2488 * This specifically looks for bitmaps that may work in the cluster, we assume
2489 * that we have already failed to find extents that will work.
2490 */
2491 static noinline int
2492 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2493 struct btrfs_free_cluster *cluster,
2494 struct list_head *bitmaps, u64 offset, u64 bytes,
2495 u64 cont1_bytes, u64 min_bytes)
2496 {
2497 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2498 struct btrfs_free_space *entry;
2499 int ret = -ENOSPC;
2500 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2501
2502 if (ctl->total_bitmaps == 0)
2503 return -ENOSPC;
2504
2505 /*
2506 * The bitmap that covers offset won't be in the list unless offset
2507 * is just its start offset.
2508 */
2509 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2510 if (entry->offset != bitmap_offset) {
2511 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2512 if (entry && list_empty(&entry->list))
2513 list_add(&entry->list, bitmaps);
2514 }
2515
2516 list_for_each_entry(entry, bitmaps, list) {
2517 if (entry->bytes < bytes)
2518 continue;
2519 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2520 bytes, cont1_bytes, min_bytes);
2521 if (!ret)
2522 return 0;
2523 }
2524
2525 /*
2526 * The bitmaps list has all the bitmaps that record free space
2527 * starting after offset, so no more search is required.
2528 */
2529 return -ENOSPC;
2530 }
2531
2532 /*
2533 * here we try to find a cluster of blocks in a block group. The goal
2534 * is to find at least bytes+empty_size.
2535 * We might not find them all in one contiguous area.
2536 *
2537 * returns zero and sets up cluster if things worked out, otherwise
2538 * it returns -enospc
2539 */
2540 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2541 struct btrfs_root *root,
2542 struct btrfs_block_group_cache *block_group,
2543 struct btrfs_free_cluster *cluster,
2544 u64 offset, u64 bytes, u64 empty_size)
2545 {
2546 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2547 struct btrfs_free_space *entry, *tmp;
2548 LIST_HEAD(bitmaps);
2549 u64 min_bytes;
2550 u64 cont1_bytes;
2551 int ret;
2552
2553 /*
2554 * Choose the minimum extent size we'll require for this
2555 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2556 * For metadata, allow allocates with smaller extents. For
2557 * data, keep it dense.
2558 */
2559 if (btrfs_test_opt(root, SSD_SPREAD)) {
2560 cont1_bytes = min_bytes = bytes + empty_size;
2561 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2562 cont1_bytes = bytes;
2563 min_bytes = block_group->sectorsize;
2564 } else {
2565 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2566 min_bytes = block_group->sectorsize;
2567 }
2568
2569 spin_lock(&ctl->tree_lock);
2570
2571 /*
2572 * If we know we don't have enough space to make a cluster don't even
2573 * bother doing all the work to try and find one.
2574 */
2575 if (ctl->free_space < bytes) {
2576 spin_unlock(&ctl->tree_lock);
2577 return -ENOSPC;
2578 }
2579
2580 spin_lock(&cluster->lock);
2581
2582 /* someone already found a cluster, hooray */
2583 if (cluster->block_group) {
2584 ret = 0;
2585 goto out;
2586 }
2587
2588 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2589 min_bytes);
2590
2591 INIT_LIST_HEAD(&bitmaps);
2592 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2593 bytes + empty_size,
2594 cont1_bytes, min_bytes);
2595 if (ret)
2596 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2597 offset, bytes + empty_size,
2598 cont1_bytes, min_bytes);
2599
2600 /* Clear our temporary list */
2601 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2602 list_del_init(&entry->list);
2603
2604 if (!ret) {
2605 atomic_inc(&block_group->count);
2606 list_add_tail(&cluster->block_group_list,
2607 &block_group->cluster_list);
2608 cluster->block_group = block_group;
2609 } else {
2610 trace_btrfs_failed_cluster_setup(block_group);
2611 }
2612 out:
2613 spin_unlock(&cluster->lock);
2614 spin_unlock(&ctl->tree_lock);
2615
2616 return ret;
2617 }
2618
2619 /*
2620 * simple code to zero out a cluster
2621 */
2622 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2623 {
2624 spin_lock_init(&cluster->lock);
2625 spin_lock_init(&cluster->refill_lock);
2626 cluster->root = RB_ROOT;
2627 cluster->max_size = 0;
2628 INIT_LIST_HEAD(&cluster->block_group_list);
2629 cluster->block_group = NULL;
2630 }
2631
2632 static int do_trimming(struct btrfs_block_group_cache *block_group,
2633 u64 *total_trimmed, u64 start, u64 bytes,
2634 u64 reserved_start, u64 reserved_bytes)
2635 {
2636 struct btrfs_space_info *space_info = block_group->space_info;
2637 struct btrfs_fs_info *fs_info = block_group->fs_info;
2638 int ret;
2639 int update = 0;
2640 u64 trimmed = 0;
2641
2642 spin_lock(&space_info->lock);
2643 spin_lock(&block_group->lock);
2644 if (!block_group->ro) {
2645 block_group->reserved += reserved_bytes;
2646 space_info->bytes_reserved += reserved_bytes;
2647 update = 1;
2648 }
2649 spin_unlock(&block_group->lock);
2650 spin_unlock(&space_info->lock);
2651
2652 ret = btrfs_error_discard_extent(fs_info->extent_root,
2653 start, bytes, &trimmed);
2654 if (!ret)
2655 *total_trimmed += trimmed;
2656
2657 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2658
2659 if (update) {
2660 spin_lock(&space_info->lock);
2661 spin_lock(&block_group->lock);
2662 if (block_group->ro)
2663 space_info->bytes_readonly += reserved_bytes;
2664 block_group->reserved -= reserved_bytes;
2665 space_info->bytes_reserved -= reserved_bytes;
2666 spin_unlock(&space_info->lock);
2667 spin_unlock(&block_group->lock);
2668 }
2669
2670 return ret;
2671 }
2672
2673 static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2674 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2675 {
2676 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2677 struct btrfs_free_space *entry;
2678 struct rb_node *node;
2679 int ret = 0;
2680 u64 extent_start;
2681 u64 extent_bytes;
2682 u64 bytes;
2683
2684 while (start < end) {
2685 spin_lock(&ctl->tree_lock);
2686
2687 if (ctl->free_space < minlen) {
2688 spin_unlock(&ctl->tree_lock);
2689 break;
2690 }
2691
2692 entry = tree_search_offset(ctl, start, 0, 1);
2693 if (!entry) {
2694 spin_unlock(&ctl->tree_lock);
2695 break;
2696 }
2697
2698 /* skip bitmaps */
2699 while (entry->bitmap) {
2700 node = rb_next(&entry->offset_index);
2701 if (!node) {
2702 spin_unlock(&ctl->tree_lock);
2703 goto out;
2704 }
2705 entry = rb_entry(node, struct btrfs_free_space,
2706 offset_index);
2707 }
2708
2709 if (entry->offset >= end) {
2710 spin_unlock(&ctl->tree_lock);
2711 break;
2712 }
2713
2714 extent_start = entry->offset;
2715 extent_bytes = entry->bytes;
2716 start = max(start, extent_start);
2717 bytes = min(extent_start + extent_bytes, end) - start;
2718 if (bytes < minlen) {
2719 spin_unlock(&ctl->tree_lock);
2720 goto next;
2721 }
2722
2723 unlink_free_space(ctl, entry);
2724 kmem_cache_free(btrfs_free_space_cachep, entry);
2725
2726 spin_unlock(&ctl->tree_lock);
2727
2728 ret = do_trimming(block_group, total_trimmed, start, bytes,
2729 extent_start, extent_bytes);
2730 if (ret)
2731 break;
2732 next:
2733 start += bytes;
2734
2735 if (fatal_signal_pending(current)) {
2736 ret = -ERESTARTSYS;
2737 break;
2738 }
2739
2740 cond_resched();
2741 }
2742 out:
2743 return ret;
2744 }
2745
2746 static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2747 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2748 {
2749 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2750 struct btrfs_free_space *entry;
2751 int ret = 0;
2752 int ret2;
2753 u64 bytes;
2754 u64 offset = offset_to_bitmap(ctl, start);
2755
2756 while (offset < end) {
2757 bool next_bitmap = false;
2758
2759 spin_lock(&ctl->tree_lock);
2760
2761 if (ctl->free_space < minlen) {
2762 spin_unlock(&ctl->tree_lock);
2763 break;
2764 }
2765
2766 entry = tree_search_offset(ctl, offset, 1, 0);
2767 if (!entry) {
2768 spin_unlock(&ctl->tree_lock);
2769 next_bitmap = true;
2770 goto next;
2771 }
2772
2773 bytes = minlen;
2774 ret2 = search_bitmap(ctl, entry, &start, &bytes);
2775 if (ret2 || start >= end) {
2776 spin_unlock(&ctl->tree_lock);
2777 next_bitmap = true;
2778 goto next;
2779 }
2780
2781 bytes = min(bytes, end - start);
2782 if (bytes < minlen) {
2783 spin_unlock(&ctl->tree_lock);
2784 goto next;
2785 }
2786
2787 bitmap_clear_bits(ctl, entry, start, bytes);
2788 if (entry->bytes == 0)
2789 free_bitmap(ctl, entry);
2790
2791 spin_unlock(&ctl->tree_lock);
2792
2793 ret = do_trimming(block_group, total_trimmed, start, bytes,
2794 start, bytes);
2795 if (ret)
2796 break;
2797 next:
2798 if (next_bitmap) {
2799 offset += BITS_PER_BITMAP * ctl->unit;
2800 } else {
2801 start += bytes;
2802 if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2803 offset += BITS_PER_BITMAP * ctl->unit;
2804 }
2805
2806 if (fatal_signal_pending(current)) {
2807 ret = -ERESTARTSYS;
2808 break;
2809 }
2810
2811 cond_resched();
2812 }
2813
2814 return ret;
2815 }
2816
2817 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2818 u64 *trimmed, u64 start, u64 end, u64 minlen)
2819 {
2820 int ret;
2821
2822 *trimmed = 0;
2823
2824 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2825 if (ret)
2826 return ret;
2827
2828 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2829
2830 return ret;
2831 }
2832
2833 /*
2834 * Find the left-most item in the cache tree, and then return the
2835 * smallest inode number in the item.
2836 *
2837 * Note: the returned inode number may not be the smallest one in
2838 * the tree, if the left-most item is a bitmap.
2839 */
2840 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2841 {
2842 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2843 struct btrfs_free_space *entry = NULL;
2844 u64 ino = 0;
2845
2846 spin_lock(&ctl->tree_lock);
2847
2848 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2849 goto out;
2850
2851 entry = rb_entry(rb_first(&ctl->free_space_offset),
2852 struct btrfs_free_space, offset_index);
2853
2854 if (!entry->bitmap) {
2855 ino = entry->offset;
2856
2857 unlink_free_space(ctl, entry);
2858 entry->offset++;
2859 entry->bytes--;
2860 if (!entry->bytes)
2861 kmem_cache_free(btrfs_free_space_cachep, entry);
2862 else
2863 link_free_space(ctl, entry);
2864 } else {
2865 u64 offset = 0;
2866 u64 count = 1;
2867 int ret;
2868
2869 ret = search_bitmap(ctl, entry, &offset, &count);
2870 /* Logic error; Should be empty if it can't find anything */
2871 BUG_ON(ret);
2872
2873 ino = offset;
2874 bitmap_clear_bits(ctl, entry, offset, 1);
2875 if (entry->bytes == 0)
2876 free_bitmap(ctl, entry);
2877 }
2878 out:
2879 spin_unlock(&ctl->tree_lock);
2880
2881 return ino;
2882 }
2883
2884 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2885 struct btrfs_path *path)
2886 {
2887 struct inode *inode = NULL;
2888
2889 spin_lock(&root->cache_lock);
2890 if (root->cache_inode)
2891 inode = igrab(root->cache_inode);
2892 spin_unlock(&root->cache_lock);
2893 if (inode)
2894 return inode;
2895
2896 inode = __lookup_free_space_inode(root, path, 0);
2897 if (IS_ERR(inode))
2898 return inode;
2899
2900 spin_lock(&root->cache_lock);
2901 if (!btrfs_fs_closing(root->fs_info))
2902 root->cache_inode = igrab(inode);
2903 spin_unlock(&root->cache_lock);
2904
2905 return inode;
2906 }
2907
2908 int create_free_ino_inode(struct btrfs_root *root,
2909 struct btrfs_trans_handle *trans,
2910 struct btrfs_path *path)
2911 {
2912 return __create_free_space_inode(root, trans, path,
2913 BTRFS_FREE_INO_OBJECTID, 0);
2914 }
2915
2916 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2917 {
2918 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2919 struct btrfs_path *path;
2920 struct inode *inode;
2921 int ret = 0;
2922 u64 root_gen = btrfs_root_generation(&root->root_item);
2923
2924 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2925 return 0;
2926
2927 /*
2928 * If we're unmounting then just return, since this does a search on the
2929 * normal root and not the commit root and we could deadlock.
2930 */
2931 if (btrfs_fs_closing(fs_info))
2932 return 0;
2933
2934 path = btrfs_alloc_path();
2935 if (!path)
2936 return 0;
2937
2938 inode = lookup_free_ino_inode(root, path);
2939 if (IS_ERR(inode))
2940 goto out;
2941
2942 if (root_gen != BTRFS_I(inode)->generation)
2943 goto out_put;
2944
2945 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2946
2947 if (ret < 0)
2948 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2949 "root %llu\n", root->root_key.objectid);
2950 out_put:
2951 iput(inode);
2952 out:
2953 btrfs_free_path(path);
2954 return ret;
2955 }
2956
2957 int btrfs_write_out_ino_cache(struct btrfs_root *root,
2958 struct btrfs_trans_handle *trans,
2959 struct btrfs_path *path)
2960 {
2961 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2962 struct inode *inode;
2963 int ret;
2964
2965 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2966 return 0;
2967
2968 inode = lookup_free_ino_inode(root, path);
2969 if (IS_ERR(inode))
2970 return 0;
2971
2972 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2973 if (ret) {
2974 btrfs_delalloc_release_metadata(inode, inode->i_size);
2975 #ifdef DEBUG
2976 printk(KERN_ERR "btrfs: failed to write free ino cache "
2977 "for root %llu\n", root->root_key.objectid);
2978 #endif
2979 }
2980
2981 iput(inode);
2982 return ret;
2983 }
This page took 0.120341 seconds and 6 git commands to generate.