2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
32 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
33 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
35 struct btrfs_trim_range
{
38 struct list_head list
;
41 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
42 struct btrfs_free_space
*info
);
43 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
44 struct btrfs_free_space
*info
);
46 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
47 struct btrfs_path
*path
,
51 struct btrfs_key location
;
52 struct btrfs_disk_key disk_key
;
53 struct btrfs_free_space_header
*header
;
54 struct extent_buffer
*leaf
;
55 struct inode
*inode
= NULL
;
58 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
62 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
66 btrfs_release_path(path
);
67 return ERR_PTR(-ENOENT
);
70 leaf
= path
->nodes
[0];
71 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
72 struct btrfs_free_space_header
);
73 btrfs_free_space_key(leaf
, header
, &disk_key
);
74 btrfs_disk_key_to_cpu(&location
, &disk_key
);
75 btrfs_release_path(path
);
77 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
79 return ERR_PTR(-ENOENT
);
82 if (is_bad_inode(inode
)) {
84 return ERR_PTR(-ENOENT
);
87 mapping_set_gfp_mask(inode
->i_mapping
,
88 mapping_gfp_mask(inode
->i_mapping
) &
89 ~(GFP_NOFS
& ~__GFP_HIGHMEM
));
94 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
95 struct btrfs_block_group_cache
96 *block_group
, struct btrfs_path
*path
)
98 struct inode
*inode
= NULL
;
99 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
101 spin_lock(&block_group
->lock
);
102 if (block_group
->inode
)
103 inode
= igrab(block_group
->inode
);
104 spin_unlock(&block_group
->lock
);
108 inode
= __lookup_free_space_inode(root
, path
,
109 block_group
->key
.objectid
);
113 spin_lock(&block_group
->lock
);
114 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
115 btrfs_info(root
->fs_info
,
116 "Old style space inode found, converting.");
117 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
118 BTRFS_INODE_NODATACOW
;
119 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
122 if (!block_group
->iref
) {
123 block_group
->inode
= igrab(inode
);
124 block_group
->iref
= 1;
126 spin_unlock(&block_group
->lock
);
131 static int __create_free_space_inode(struct btrfs_root
*root
,
132 struct btrfs_trans_handle
*trans
,
133 struct btrfs_path
*path
,
136 struct btrfs_key key
;
137 struct btrfs_disk_key disk_key
;
138 struct btrfs_free_space_header
*header
;
139 struct btrfs_inode_item
*inode_item
;
140 struct extent_buffer
*leaf
;
141 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
144 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
148 /* We inline crc's for the free disk space cache */
149 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
150 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
152 leaf
= path
->nodes
[0];
153 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
154 struct btrfs_inode_item
);
155 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
156 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
157 sizeof(*inode_item
));
158 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
159 btrfs_set_inode_size(leaf
, inode_item
, 0);
160 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
161 btrfs_set_inode_uid(leaf
, inode_item
, 0);
162 btrfs_set_inode_gid(leaf
, inode_item
, 0);
163 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
164 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
165 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
166 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
167 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
168 btrfs_mark_buffer_dirty(leaf
);
169 btrfs_release_path(path
);
171 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
174 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
175 sizeof(struct btrfs_free_space_header
));
177 btrfs_release_path(path
);
181 leaf
= path
->nodes
[0];
182 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
183 struct btrfs_free_space_header
);
184 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
185 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
186 btrfs_mark_buffer_dirty(leaf
);
187 btrfs_release_path(path
);
192 int create_free_space_inode(struct btrfs_root
*root
,
193 struct btrfs_trans_handle
*trans
,
194 struct btrfs_block_group_cache
*block_group
,
195 struct btrfs_path
*path
)
200 ret
= btrfs_find_free_objectid(root
, &ino
);
204 return __create_free_space_inode(root
, trans
, path
, ino
,
205 block_group
->key
.objectid
);
208 int btrfs_check_trunc_cache_free_space(struct btrfs_root
*root
,
209 struct btrfs_block_rsv
*rsv
)
214 /* 1 for slack space, 1 for updating the inode */
215 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
216 btrfs_calc_trans_metadata_size(root
, 1);
218 spin_lock(&rsv
->lock
);
219 if (rsv
->reserved
< needed_bytes
)
223 spin_unlock(&rsv
->lock
);
227 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
228 struct btrfs_trans_handle
*trans
,
229 struct btrfs_block_group_cache
*block_group
,
233 struct btrfs_path
*path
= btrfs_alloc_path();
241 mutex_lock(&trans
->transaction
->cache_write_mutex
);
242 if (!list_empty(&block_group
->io_list
)) {
243 list_del_init(&block_group
->io_list
);
245 btrfs_wait_cache_io(root
, trans
, block_group
,
246 &block_group
->io_ctl
, path
,
247 block_group
->key
.objectid
);
248 btrfs_put_block_group(block_group
);
252 * now that we've truncated the cache away, its no longer
255 spin_lock(&block_group
->lock
);
256 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
257 spin_unlock(&block_group
->lock
);
259 btrfs_free_path(path
);
261 btrfs_i_size_write(inode
, 0);
262 truncate_pagecache(inode
, 0);
265 * We don't need an orphan item because truncating the free space cache
266 * will never be split across transactions.
267 * We don't need to check for -EAGAIN because we're a free space
270 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
271 0, BTRFS_EXTENT_DATA_KEY
);
273 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
274 btrfs_abort_transaction(trans
, root
, ret
);
278 ret
= btrfs_update_inode(trans
, root
, inode
);
281 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
285 btrfs_abort_transaction(trans
, root
, ret
);
290 static int readahead_cache(struct inode
*inode
)
292 struct file_ra_state
*ra
;
293 unsigned long last_index
;
295 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
299 file_ra_state_init(ra
, inode
->i_mapping
);
300 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
302 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
309 static int io_ctl_init(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
310 struct btrfs_root
*root
, int write
)
315 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_CACHE_SIZE
);
317 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
320 /* Make sure we can fit our crcs into the first page */
321 if (write
&& check_crcs
&&
322 (num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
)
325 memset(io_ctl
, 0, sizeof(struct btrfs_io_ctl
));
327 io_ctl
->pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_NOFS
);
331 io_ctl
->num_pages
= num_pages
;
333 io_ctl
->check_crcs
= check_crcs
;
334 io_ctl
->inode
= inode
;
339 static void io_ctl_free(struct btrfs_io_ctl
*io_ctl
)
341 kfree(io_ctl
->pages
);
342 io_ctl
->pages
= NULL
;
345 static void io_ctl_unmap_page(struct btrfs_io_ctl
*io_ctl
)
353 static void io_ctl_map_page(struct btrfs_io_ctl
*io_ctl
, int clear
)
355 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
356 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
357 io_ctl
->cur
= page_address(io_ctl
->page
);
358 io_ctl
->orig
= io_ctl
->cur
;
359 io_ctl
->size
= PAGE_CACHE_SIZE
;
361 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
364 static void io_ctl_drop_pages(struct btrfs_io_ctl
*io_ctl
)
368 io_ctl_unmap_page(io_ctl
);
370 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
371 if (io_ctl
->pages
[i
]) {
372 ClearPageChecked(io_ctl
->pages
[i
]);
373 unlock_page(io_ctl
->pages
[i
]);
374 page_cache_release(io_ctl
->pages
[i
]);
379 static int io_ctl_prepare_pages(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
383 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
386 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
387 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
389 io_ctl_drop_pages(io_ctl
);
392 io_ctl
->pages
[i
] = page
;
393 if (uptodate
&& !PageUptodate(page
)) {
394 btrfs_readpage(NULL
, page
);
396 if (!PageUptodate(page
)) {
397 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
398 "error reading free space cache");
399 io_ctl_drop_pages(io_ctl
);
405 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
406 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
407 set_page_extent_mapped(io_ctl
->pages
[i
]);
413 static void io_ctl_set_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
417 io_ctl_map_page(io_ctl
, 1);
420 * Skip the csum areas. If we don't check crcs then we just have a
421 * 64bit chunk at the front of the first page.
423 if (io_ctl
->check_crcs
) {
424 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
425 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
427 io_ctl
->cur
+= sizeof(u64
);
428 io_ctl
->size
-= sizeof(u64
) * 2;
432 *val
= cpu_to_le64(generation
);
433 io_ctl
->cur
+= sizeof(u64
);
436 static int io_ctl_check_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
441 * Skip the crc area. If we don't check crcs then we just have a 64bit
442 * chunk at the front of the first page.
444 if (io_ctl
->check_crcs
) {
445 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
446 io_ctl
->size
-= sizeof(u64
) +
447 (sizeof(u32
) * io_ctl
->num_pages
);
449 io_ctl
->cur
+= sizeof(u64
);
450 io_ctl
->size
-= sizeof(u64
) * 2;
454 if (le64_to_cpu(*gen
) != generation
) {
455 printk_ratelimited(KERN_ERR
"BTRFS: space cache generation "
456 "(%Lu) does not match inode (%Lu)\n", *gen
,
458 io_ctl_unmap_page(io_ctl
);
461 io_ctl
->cur
+= sizeof(u64
);
465 static void io_ctl_set_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
471 if (!io_ctl
->check_crcs
) {
472 io_ctl_unmap_page(io_ctl
);
477 offset
= sizeof(u32
) * io_ctl
->num_pages
;
479 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
480 PAGE_CACHE_SIZE
- offset
);
481 btrfs_csum_final(crc
, (char *)&crc
);
482 io_ctl_unmap_page(io_ctl
);
483 tmp
= page_address(io_ctl
->pages
[0]);
488 static int io_ctl_check_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
494 if (!io_ctl
->check_crcs
) {
495 io_ctl_map_page(io_ctl
, 0);
500 offset
= sizeof(u32
) * io_ctl
->num_pages
;
502 tmp
= page_address(io_ctl
->pages
[0]);
506 io_ctl_map_page(io_ctl
, 0);
507 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
508 PAGE_CACHE_SIZE
- offset
);
509 btrfs_csum_final(crc
, (char *)&crc
);
511 printk_ratelimited(KERN_ERR
"BTRFS: csum mismatch on free "
513 io_ctl_unmap_page(io_ctl
);
520 static int io_ctl_add_entry(struct btrfs_io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
523 struct btrfs_free_space_entry
*entry
;
529 entry
->offset
= cpu_to_le64(offset
);
530 entry
->bytes
= cpu_to_le64(bytes
);
531 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
532 BTRFS_FREE_SPACE_EXTENT
;
533 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
534 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
536 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
539 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
541 /* No more pages to map */
542 if (io_ctl
->index
>= io_ctl
->num_pages
)
545 /* map the next page */
546 io_ctl_map_page(io_ctl
, 1);
550 static int io_ctl_add_bitmap(struct btrfs_io_ctl
*io_ctl
, void *bitmap
)
556 * If we aren't at the start of the current page, unmap this one and
557 * map the next one if there is any left.
559 if (io_ctl
->cur
!= io_ctl
->orig
) {
560 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
561 if (io_ctl
->index
>= io_ctl
->num_pages
)
563 io_ctl_map_page(io_ctl
, 0);
566 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
567 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
568 if (io_ctl
->index
< io_ctl
->num_pages
)
569 io_ctl_map_page(io_ctl
, 0);
573 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl
*io_ctl
)
576 * If we're not on the boundary we know we've modified the page and we
577 * need to crc the page.
579 if (io_ctl
->cur
!= io_ctl
->orig
)
580 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
582 io_ctl_unmap_page(io_ctl
);
584 while (io_ctl
->index
< io_ctl
->num_pages
) {
585 io_ctl_map_page(io_ctl
, 1);
586 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
590 static int io_ctl_read_entry(struct btrfs_io_ctl
*io_ctl
,
591 struct btrfs_free_space
*entry
, u8
*type
)
593 struct btrfs_free_space_entry
*e
;
597 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
603 entry
->offset
= le64_to_cpu(e
->offset
);
604 entry
->bytes
= le64_to_cpu(e
->bytes
);
606 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
607 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
609 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
612 io_ctl_unmap_page(io_ctl
);
617 static int io_ctl_read_bitmap(struct btrfs_io_ctl
*io_ctl
,
618 struct btrfs_free_space
*entry
)
622 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
626 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
627 io_ctl_unmap_page(io_ctl
);
633 * Since we attach pinned extents after the fact we can have contiguous sections
634 * of free space that are split up in entries. This poses a problem with the
635 * tree logging stuff since it could have allocated across what appears to be 2
636 * entries since we would have merged the entries when adding the pinned extents
637 * back to the free space cache. So run through the space cache that we just
638 * loaded and merge contiguous entries. This will make the log replay stuff not
639 * blow up and it will make for nicer allocator behavior.
641 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
643 struct btrfs_free_space
*e
, *prev
= NULL
;
647 spin_lock(&ctl
->tree_lock
);
648 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
649 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
652 if (e
->bitmap
|| prev
->bitmap
)
654 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
655 unlink_free_space(ctl
, prev
);
656 unlink_free_space(ctl
, e
);
657 prev
->bytes
+= e
->bytes
;
658 kmem_cache_free(btrfs_free_space_cachep
, e
);
659 link_free_space(ctl
, prev
);
661 spin_unlock(&ctl
->tree_lock
);
667 spin_unlock(&ctl
->tree_lock
);
670 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
671 struct btrfs_free_space_ctl
*ctl
,
672 struct btrfs_path
*path
, u64 offset
)
674 struct btrfs_free_space_header
*header
;
675 struct extent_buffer
*leaf
;
676 struct btrfs_io_ctl io_ctl
;
677 struct btrfs_key key
;
678 struct btrfs_free_space
*e
, *n
;
686 /* Nothing in the space cache, goodbye */
687 if (!i_size_read(inode
))
690 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
694 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
698 btrfs_release_path(path
);
704 leaf
= path
->nodes
[0];
705 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
706 struct btrfs_free_space_header
);
707 num_entries
= btrfs_free_space_entries(leaf
, header
);
708 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
709 generation
= btrfs_free_space_generation(leaf
, header
);
710 btrfs_release_path(path
);
712 if (!BTRFS_I(inode
)->generation
) {
713 btrfs_info(root
->fs_info
,
714 "The free space cache file (%llu) is invalid. skip it\n",
719 if (BTRFS_I(inode
)->generation
!= generation
) {
720 btrfs_err(root
->fs_info
,
721 "free space inode generation (%llu) "
722 "did not match free space cache generation (%llu)",
723 BTRFS_I(inode
)->generation
, generation
);
730 ret
= io_ctl_init(&io_ctl
, inode
, root
, 0);
734 ret
= readahead_cache(inode
);
738 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
742 ret
= io_ctl_check_crc(&io_ctl
, 0);
746 ret
= io_ctl_check_generation(&io_ctl
, generation
);
750 while (num_entries
) {
751 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
756 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
758 kmem_cache_free(btrfs_free_space_cachep
, e
);
763 kmem_cache_free(btrfs_free_space_cachep
, e
);
767 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
768 spin_lock(&ctl
->tree_lock
);
769 ret
= link_free_space(ctl
, e
);
770 spin_unlock(&ctl
->tree_lock
);
772 btrfs_err(root
->fs_info
,
773 "Duplicate entries in free space cache, dumping");
774 kmem_cache_free(btrfs_free_space_cachep
, e
);
780 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
783 btrfs_free_space_cachep
, e
);
786 spin_lock(&ctl
->tree_lock
);
787 ret
= link_free_space(ctl
, e
);
788 ctl
->total_bitmaps
++;
789 ctl
->op
->recalc_thresholds(ctl
);
790 spin_unlock(&ctl
->tree_lock
);
792 btrfs_err(root
->fs_info
,
793 "Duplicate entries in free space cache, dumping");
794 kmem_cache_free(btrfs_free_space_cachep
, e
);
797 list_add_tail(&e
->list
, &bitmaps
);
803 io_ctl_unmap_page(&io_ctl
);
806 * We add the bitmaps at the end of the entries in order that
807 * the bitmap entries are added to the cache.
809 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
810 list_del_init(&e
->list
);
811 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
816 io_ctl_drop_pages(&io_ctl
);
817 merge_space_tree(ctl
);
820 io_ctl_free(&io_ctl
);
823 io_ctl_drop_pages(&io_ctl
);
824 __btrfs_remove_free_space_cache(ctl
);
828 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
829 struct btrfs_block_group_cache
*block_group
)
831 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
832 struct btrfs_root
*root
= fs_info
->tree_root
;
834 struct btrfs_path
*path
;
837 u64 used
= btrfs_block_group_used(&block_group
->item
);
840 * If this block group has been marked to be cleared for one reason or
841 * another then we can't trust the on disk cache, so just return.
843 spin_lock(&block_group
->lock
);
844 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
845 spin_unlock(&block_group
->lock
);
848 spin_unlock(&block_group
->lock
);
850 path
= btrfs_alloc_path();
853 path
->search_commit_root
= 1;
854 path
->skip_locking
= 1;
856 inode
= lookup_free_space_inode(root
, block_group
, path
);
858 btrfs_free_path(path
);
862 /* We may have converted the inode and made the cache invalid. */
863 spin_lock(&block_group
->lock
);
864 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
865 spin_unlock(&block_group
->lock
);
866 btrfs_free_path(path
);
869 spin_unlock(&block_group
->lock
);
871 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
872 path
, block_group
->key
.objectid
);
873 btrfs_free_path(path
);
877 spin_lock(&ctl
->tree_lock
);
878 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
879 block_group
->bytes_super
));
880 spin_unlock(&ctl
->tree_lock
);
883 __btrfs_remove_free_space_cache(ctl
);
884 btrfs_warn(fs_info
, "block group %llu has wrong amount of free space",
885 block_group
->key
.objectid
);
890 /* This cache is bogus, make sure it gets cleared */
891 spin_lock(&block_group
->lock
);
892 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
893 spin_unlock(&block_group
->lock
);
896 btrfs_warn(fs_info
, "failed to load free space cache for block group %llu, rebuild it now",
897 block_group
->key
.objectid
);
904 static noinline_for_stack
905 int write_cache_extent_entries(struct btrfs_io_ctl
*io_ctl
,
906 struct btrfs_free_space_ctl
*ctl
,
907 struct btrfs_block_group_cache
*block_group
,
908 int *entries
, int *bitmaps
,
909 struct list_head
*bitmap_list
)
912 struct btrfs_free_cluster
*cluster
= NULL
;
913 struct btrfs_free_cluster
*cluster_locked
= NULL
;
914 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
915 struct btrfs_trim_range
*trim_entry
;
917 /* Get the cluster for this block_group if it exists */
918 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
919 cluster
= list_entry(block_group
->cluster_list
.next
,
920 struct btrfs_free_cluster
,
924 if (!node
&& cluster
) {
925 cluster_locked
= cluster
;
926 spin_lock(&cluster_locked
->lock
);
927 node
= rb_first(&cluster
->root
);
931 /* Write out the extent entries */
933 struct btrfs_free_space
*e
;
935 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
938 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
944 list_add_tail(&e
->list
, bitmap_list
);
947 node
= rb_next(node
);
948 if (!node
&& cluster
) {
949 node
= rb_first(&cluster
->root
);
950 cluster_locked
= cluster
;
951 spin_lock(&cluster_locked
->lock
);
955 if (cluster_locked
) {
956 spin_unlock(&cluster_locked
->lock
);
957 cluster_locked
= NULL
;
961 * Make sure we don't miss any range that was removed from our rbtree
962 * because trimming is running. Otherwise after a umount+mount (or crash
963 * after committing the transaction) we would leak free space and get
964 * an inconsistent free space cache report from fsck.
966 list_for_each_entry(trim_entry
, &ctl
->trimming_ranges
, list
) {
967 ret
= io_ctl_add_entry(io_ctl
, trim_entry
->start
,
968 trim_entry
->bytes
, NULL
);
977 spin_unlock(&cluster_locked
->lock
);
981 static noinline_for_stack
int
982 update_cache_item(struct btrfs_trans_handle
*trans
,
983 struct btrfs_root
*root
,
985 struct btrfs_path
*path
, u64 offset
,
986 int entries
, int bitmaps
)
988 struct btrfs_key key
;
989 struct btrfs_free_space_header
*header
;
990 struct extent_buffer
*leaf
;
993 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
997 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
999 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1000 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1004 leaf
= path
->nodes
[0];
1006 struct btrfs_key found_key
;
1007 ASSERT(path
->slots
[0]);
1009 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1010 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1011 found_key
.offset
!= offset
) {
1012 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1014 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1016 btrfs_release_path(path
);
1021 BTRFS_I(inode
)->generation
= trans
->transid
;
1022 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1023 struct btrfs_free_space_header
);
1024 btrfs_set_free_space_entries(leaf
, header
, entries
);
1025 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1026 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1027 btrfs_mark_buffer_dirty(leaf
);
1028 btrfs_release_path(path
);
1036 static noinline_for_stack
int
1037 write_pinned_extent_entries(struct btrfs_root
*root
,
1038 struct btrfs_block_group_cache
*block_group
,
1039 struct btrfs_io_ctl
*io_ctl
,
1042 u64 start
, extent_start
, extent_end
, len
;
1043 struct extent_io_tree
*unpin
= NULL
;
1050 * We want to add any pinned extents to our free space cache
1051 * so we don't leak the space
1053 * We shouldn't have switched the pinned extents yet so this is the
1056 unpin
= root
->fs_info
->pinned_extents
;
1058 start
= block_group
->key
.objectid
;
1060 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1061 ret
= find_first_extent_bit(unpin
, start
,
1062 &extent_start
, &extent_end
,
1063 EXTENT_DIRTY
, NULL
);
1067 /* This pinned extent is out of our range */
1068 if (extent_start
>= block_group
->key
.objectid
+
1069 block_group
->key
.offset
)
1072 extent_start
= max(extent_start
, start
);
1073 extent_end
= min(block_group
->key
.objectid
+
1074 block_group
->key
.offset
, extent_end
+ 1);
1075 len
= extent_end
- extent_start
;
1078 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1088 static noinline_for_stack
int
1089 write_bitmap_entries(struct btrfs_io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1091 struct list_head
*pos
, *n
;
1094 /* Write out the bitmaps */
1095 list_for_each_safe(pos
, n
, bitmap_list
) {
1096 struct btrfs_free_space
*entry
=
1097 list_entry(pos
, struct btrfs_free_space
, list
);
1099 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1102 list_del_init(&entry
->list
);
1108 static int flush_dirty_cache(struct inode
*inode
)
1112 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1114 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1115 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1121 static void noinline_for_stack
1122 cleanup_write_cache_enospc(struct inode
*inode
,
1123 struct btrfs_io_ctl
*io_ctl
,
1124 struct extent_state
**cached_state
,
1125 struct list_head
*bitmap_list
)
1127 struct list_head
*pos
, *n
;
1129 list_for_each_safe(pos
, n
, bitmap_list
) {
1130 struct btrfs_free_space
*entry
=
1131 list_entry(pos
, struct btrfs_free_space
, list
);
1132 list_del_init(&entry
->list
);
1134 io_ctl_drop_pages(io_ctl
);
1135 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1136 i_size_read(inode
) - 1, cached_state
,
1140 int btrfs_wait_cache_io(struct btrfs_root
*root
,
1141 struct btrfs_trans_handle
*trans
,
1142 struct btrfs_block_group_cache
*block_group
,
1143 struct btrfs_io_ctl
*io_ctl
,
1144 struct btrfs_path
*path
, u64 offset
)
1147 struct inode
*inode
= io_ctl
->inode
;
1152 root
= root
->fs_info
->tree_root
;
1154 /* Flush the dirty pages in the cache file. */
1155 ret
= flush_dirty_cache(inode
);
1159 /* Update the cache item to tell everyone this cache file is valid. */
1160 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1161 io_ctl
->entries
, io_ctl
->bitmaps
);
1163 io_ctl_free(io_ctl
);
1165 invalidate_inode_pages2(inode
->i_mapping
);
1166 BTRFS_I(inode
)->generation
= 0;
1169 btrfs_err(root
->fs_info
,
1170 "failed to write free space cache for block group %llu",
1171 block_group
->key
.objectid
);
1175 btrfs_update_inode(trans
, root
, inode
);
1178 /* the dirty list is protected by the dirty_bgs_lock */
1179 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1181 /* the disk_cache_state is protected by the block group lock */
1182 spin_lock(&block_group
->lock
);
1185 * only mark this as written if we didn't get put back on
1186 * the dirty list while waiting for IO. Otherwise our
1187 * cache state won't be right, and we won't get written again
1189 if (!ret
&& list_empty(&block_group
->dirty_list
))
1190 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1192 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1194 spin_unlock(&block_group
->lock
);
1195 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1196 io_ctl
->inode
= NULL
;
1205 * __btrfs_write_out_cache - write out cached info to an inode
1206 * @root - the root the inode belongs to
1207 * @ctl - the free space cache we are going to write out
1208 * @block_group - the block_group for this cache if it belongs to a block_group
1209 * @trans - the trans handle
1210 * @path - the path to use
1211 * @offset - the offset for the key we'll insert
1213 * This function writes out a free space cache struct to disk for quick recovery
1214 * on mount. This will return 0 if it was successfull in writing the cache out,
1215 * and -1 if it was not.
1217 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1218 struct btrfs_free_space_ctl
*ctl
,
1219 struct btrfs_block_group_cache
*block_group
,
1220 struct btrfs_io_ctl
*io_ctl
,
1221 struct btrfs_trans_handle
*trans
,
1222 struct btrfs_path
*path
, u64 offset
)
1224 struct extent_state
*cached_state
= NULL
;
1225 LIST_HEAD(bitmap_list
);
1231 if (!i_size_read(inode
))
1234 WARN_ON(io_ctl
->pages
);
1235 ret
= io_ctl_init(io_ctl
, inode
, root
, 1);
1239 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1240 down_write(&block_group
->data_rwsem
);
1241 spin_lock(&block_group
->lock
);
1242 if (block_group
->delalloc_bytes
) {
1243 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1244 spin_unlock(&block_group
->lock
);
1245 up_write(&block_group
->data_rwsem
);
1246 BTRFS_I(inode
)->generation
= 0;
1251 spin_unlock(&block_group
->lock
);
1254 /* Lock all pages first so we can lock the extent safely. */
1255 io_ctl_prepare_pages(io_ctl
, inode
, 0);
1257 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1260 io_ctl_set_generation(io_ctl
, trans
->transid
);
1262 mutex_lock(&ctl
->cache_writeout_mutex
);
1263 /* Write out the extent entries in the free space cache */
1264 spin_lock(&ctl
->tree_lock
);
1265 ret
= write_cache_extent_entries(io_ctl
, ctl
,
1266 block_group
, &entries
, &bitmaps
,
1268 spin_unlock(&ctl
->tree_lock
);
1270 mutex_unlock(&ctl
->cache_writeout_mutex
);
1275 * Some spaces that are freed in the current transaction are pinned,
1276 * they will be added into free space cache after the transaction is
1277 * committed, we shouldn't lose them.
1279 * If this changes while we are working we'll get added back to
1280 * the dirty list and redo it. No locking needed
1282 ret
= write_pinned_extent_entries(root
, block_group
, io_ctl
, &entries
);
1284 mutex_unlock(&ctl
->cache_writeout_mutex
);
1289 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1290 * locked while doing it because a concurrent trim can be manipulating
1291 * or freeing the bitmap.
1293 spin_lock(&ctl
->tree_lock
);
1294 ret
= write_bitmap_entries(io_ctl
, &bitmap_list
);
1295 spin_unlock(&ctl
->tree_lock
);
1296 mutex_unlock(&ctl
->cache_writeout_mutex
);
1300 /* Zero out the rest of the pages just to make sure */
1301 io_ctl_zero_remaining_pages(io_ctl
);
1303 /* Everything is written out, now we dirty the pages in the file. */
1304 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
->pages
, io_ctl
->num_pages
,
1305 0, i_size_read(inode
), &cached_state
);
1309 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1310 up_write(&block_group
->data_rwsem
);
1312 * Release the pages and unlock the extent, we will flush
1315 io_ctl_drop_pages(io_ctl
);
1317 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1318 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1321 * at this point the pages are under IO and we're happy,
1322 * The caller is responsible for waiting on them and updating the
1323 * the cache and the inode
1325 io_ctl
->entries
= entries
;
1326 io_ctl
->bitmaps
= bitmaps
;
1328 ret
= btrfs_fdatawrite_range(inode
, 0, (u64
)-1);
1335 io_ctl
->inode
= NULL
;
1336 io_ctl_free(io_ctl
);
1338 invalidate_inode_pages2(inode
->i_mapping
);
1339 BTRFS_I(inode
)->generation
= 0;
1341 btrfs_update_inode(trans
, root
, inode
);
1347 cleanup_write_cache_enospc(inode
, io_ctl
, &cached_state
, &bitmap_list
);
1349 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1350 up_write(&block_group
->data_rwsem
);
1355 int btrfs_write_out_cache(struct btrfs_root
*root
,
1356 struct btrfs_trans_handle
*trans
,
1357 struct btrfs_block_group_cache
*block_group
,
1358 struct btrfs_path
*path
)
1360 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1361 struct inode
*inode
;
1364 root
= root
->fs_info
->tree_root
;
1366 spin_lock(&block_group
->lock
);
1367 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1368 spin_unlock(&block_group
->lock
);
1371 spin_unlock(&block_group
->lock
);
1373 inode
= lookup_free_space_inode(root
, block_group
, path
);
1377 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
,
1378 &block_group
->io_ctl
, trans
,
1379 path
, block_group
->key
.objectid
);
1382 btrfs_err(root
->fs_info
,
1383 "failed to write free space cache for block group %llu",
1384 block_group
->key
.objectid
);
1386 spin_lock(&block_group
->lock
);
1387 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1388 spin_unlock(&block_group
->lock
);
1390 block_group
->io_ctl
.inode
= NULL
;
1395 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1396 * to wait for IO and put the inode
1402 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1405 ASSERT(offset
>= bitmap_start
);
1406 offset
-= bitmap_start
;
1407 return (unsigned long)(div_u64(offset
, unit
));
1410 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1412 return (unsigned long)(div_u64(bytes
, unit
));
1415 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1419 u32 bytes_per_bitmap
;
1421 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1422 bitmap_start
= offset
- ctl
->start
;
1423 bitmap_start
= div_u64(bitmap_start
, bytes_per_bitmap
);
1424 bitmap_start
*= bytes_per_bitmap
;
1425 bitmap_start
+= ctl
->start
;
1427 return bitmap_start
;
1430 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1431 struct rb_node
*node
, int bitmap
)
1433 struct rb_node
**p
= &root
->rb_node
;
1434 struct rb_node
*parent
= NULL
;
1435 struct btrfs_free_space
*info
;
1439 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1441 if (offset
< info
->offset
) {
1443 } else if (offset
> info
->offset
) {
1444 p
= &(*p
)->rb_right
;
1447 * we could have a bitmap entry and an extent entry
1448 * share the same offset. If this is the case, we want
1449 * the extent entry to always be found first if we do a
1450 * linear search through the tree, since we want to have
1451 * the quickest allocation time, and allocating from an
1452 * extent is faster than allocating from a bitmap. So
1453 * if we're inserting a bitmap and we find an entry at
1454 * this offset, we want to go right, or after this entry
1455 * logically. If we are inserting an extent and we've
1456 * found a bitmap, we want to go left, or before
1464 p
= &(*p
)->rb_right
;
1466 if (!info
->bitmap
) {
1475 rb_link_node(node
, parent
, p
);
1476 rb_insert_color(node
, root
);
1482 * searches the tree for the given offset.
1484 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1485 * want a section that has at least bytes size and comes at or after the given
1488 static struct btrfs_free_space
*
1489 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1490 u64 offset
, int bitmap_only
, int fuzzy
)
1492 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1493 struct btrfs_free_space
*entry
, *prev
= NULL
;
1495 /* find entry that is closest to the 'offset' */
1502 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1505 if (offset
< entry
->offset
)
1507 else if (offset
> entry
->offset
)
1520 * bitmap entry and extent entry may share same offset,
1521 * in that case, bitmap entry comes after extent entry.
1526 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1527 if (entry
->offset
!= offset
)
1530 WARN_ON(!entry
->bitmap
);
1533 if (entry
->bitmap
) {
1535 * if previous extent entry covers the offset,
1536 * we should return it instead of the bitmap entry
1538 n
= rb_prev(&entry
->offset_index
);
1540 prev
= rb_entry(n
, struct btrfs_free_space
,
1542 if (!prev
->bitmap
&&
1543 prev
->offset
+ prev
->bytes
> offset
)
1553 /* find last entry before the 'offset' */
1555 if (entry
->offset
> offset
) {
1556 n
= rb_prev(&entry
->offset_index
);
1558 entry
= rb_entry(n
, struct btrfs_free_space
,
1560 ASSERT(entry
->offset
<= offset
);
1569 if (entry
->bitmap
) {
1570 n
= rb_prev(&entry
->offset_index
);
1572 prev
= rb_entry(n
, struct btrfs_free_space
,
1574 if (!prev
->bitmap
&&
1575 prev
->offset
+ prev
->bytes
> offset
)
1578 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1580 } else if (entry
->offset
+ entry
->bytes
> offset
)
1587 if (entry
->bitmap
) {
1588 if (entry
->offset
+ BITS_PER_BITMAP
*
1592 if (entry
->offset
+ entry
->bytes
> offset
)
1596 n
= rb_next(&entry
->offset_index
);
1599 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1605 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1606 struct btrfs_free_space
*info
)
1608 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1609 ctl
->free_extents
--;
1612 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1613 struct btrfs_free_space
*info
)
1615 __unlink_free_space(ctl
, info
);
1616 ctl
->free_space
-= info
->bytes
;
1619 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1620 struct btrfs_free_space
*info
)
1624 ASSERT(info
->bytes
|| info
->bitmap
);
1625 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1626 &info
->offset_index
, (info
->bitmap
!= NULL
));
1630 ctl
->free_space
+= info
->bytes
;
1631 ctl
->free_extents
++;
1635 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1637 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1641 u64 size
= block_group
->key
.offset
;
1642 u32 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1643 u32 max_bitmaps
= div_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1645 max_bitmaps
= max_t(u32
, max_bitmaps
, 1);
1647 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1650 * The goal is to keep the total amount of memory used per 1gb of space
1651 * at or below 32k, so we need to adjust how much memory we allow to be
1652 * used by extent based free space tracking
1654 if (size
< 1024 * 1024 * 1024)
1655 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1657 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1658 div_u64(size
, 1024 * 1024 * 1024);
1661 * we want to account for 1 more bitmap than what we have so we can make
1662 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1663 * we add more bitmaps.
1665 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1667 if (bitmap_bytes
>= max_bytes
) {
1668 ctl
->extents_thresh
= 0;
1673 * we want the extent entry threshold to always be at most 1/2 the max
1674 * bytes we can have, or whatever is less than that.
1676 extent_bytes
= max_bytes
- bitmap_bytes
;
1677 extent_bytes
= min_t(u64
, extent_bytes
, max_bytes
>> 1);
1679 ctl
->extents_thresh
=
1680 div_u64(extent_bytes
, sizeof(struct btrfs_free_space
));
1683 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1684 struct btrfs_free_space
*info
,
1685 u64 offset
, u64 bytes
)
1687 unsigned long start
, count
;
1689 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1690 count
= bytes_to_bits(bytes
, ctl
->unit
);
1691 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1693 bitmap_clear(info
->bitmap
, start
, count
);
1695 info
->bytes
-= bytes
;
1698 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1699 struct btrfs_free_space
*info
, u64 offset
,
1702 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1703 ctl
->free_space
-= bytes
;
1706 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1707 struct btrfs_free_space
*info
, u64 offset
,
1710 unsigned long start
, count
;
1712 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1713 count
= bytes_to_bits(bytes
, ctl
->unit
);
1714 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1716 bitmap_set(info
->bitmap
, start
, count
);
1718 info
->bytes
+= bytes
;
1719 ctl
->free_space
+= bytes
;
1723 * If we can not find suitable extent, we will use bytes to record
1724 * the size of the max extent.
1726 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1727 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1730 unsigned long found_bits
= 0;
1731 unsigned long max_bits
= 0;
1732 unsigned long bits
, i
;
1733 unsigned long next_zero
;
1734 unsigned long extent_bits
;
1736 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1737 max_t(u64
, *offset
, bitmap_info
->offset
));
1738 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1740 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1741 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1742 BITS_PER_BITMAP
, i
);
1743 extent_bits
= next_zero
- i
;
1744 if (extent_bits
>= bits
) {
1745 found_bits
= extent_bits
;
1747 } else if (extent_bits
> max_bits
) {
1748 max_bits
= extent_bits
;
1754 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1755 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1759 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1763 /* Cache the size of the max extent in bytes */
1764 static struct btrfs_free_space
*
1765 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1766 unsigned long align
, u64
*max_extent_size
)
1768 struct btrfs_free_space
*entry
;
1769 struct rb_node
*node
;
1774 if (!ctl
->free_space_offset
.rb_node
)
1777 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1781 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1782 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1783 if (entry
->bytes
< *bytes
) {
1784 if (entry
->bytes
> *max_extent_size
)
1785 *max_extent_size
= entry
->bytes
;
1789 /* make sure the space returned is big enough
1790 * to match our requested alignment
1792 if (*bytes
>= align
) {
1793 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1794 tmp
= div64_u64(tmp
, align
);
1795 tmp
= tmp
* align
+ ctl
->start
;
1796 align_off
= tmp
- entry
->offset
;
1799 tmp
= entry
->offset
;
1802 if (entry
->bytes
< *bytes
+ align_off
) {
1803 if (entry
->bytes
> *max_extent_size
)
1804 *max_extent_size
= entry
->bytes
;
1808 if (entry
->bitmap
) {
1811 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
);
1816 } else if (size
> *max_extent_size
) {
1817 *max_extent_size
= size
;
1823 *bytes
= entry
->bytes
- align_off
;
1830 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1831 struct btrfs_free_space
*info
, u64 offset
)
1833 info
->offset
= offset_to_bitmap(ctl
, offset
);
1835 INIT_LIST_HEAD(&info
->list
);
1836 link_free_space(ctl
, info
);
1837 ctl
->total_bitmaps
++;
1839 ctl
->op
->recalc_thresholds(ctl
);
1842 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1843 struct btrfs_free_space
*bitmap_info
)
1845 unlink_free_space(ctl
, bitmap_info
);
1846 kfree(bitmap_info
->bitmap
);
1847 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1848 ctl
->total_bitmaps
--;
1849 ctl
->op
->recalc_thresholds(ctl
);
1852 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1853 struct btrfs_free_space
*bitmap_info
,
1854 u64
*offset
, u64
*bytes
)
1857 u64 search_start
, search_bytes
;
1861 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1864 * We need to search for bits in this bitmap. We could only cover some
1865 * of the extent in this bitmap thanks to how we add space, so we need
1866 * to search for as much as it as we can and clear that amount, and then
1867 * go searching for the next bit.
1869 search_start
= *offset
;
1870 search_bytes
= ctl
->unit
;
1871 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1872 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1873 if (ret
< 0 || search_start
!= *offset
)
1876 /* We may have found more bits than what we need */
1877 search_bytes
= min(search_bytes
, *bytes
);
1879 /* Cannot clear past the end of the bitmap */
1880 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1882 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1883 *offset
+= search_bytes
;
1884 *bytes
-= search_bytes
;
1887 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1888 if (!bitmap_info
->bytes
)
1889 free_bitmap(ctl
, bitmap_info
);
1892 * no entry after this bitmap, but we still have bytes to
1893 * remove, so something has gone wrong.
1898 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1902 * if the next entry isn't a bitmap we need to return to let the
1903 * extent stuff do its work.
1905 if (!bitmap_info
->bitmap
)
1909 * Ok the next item is a bitmap, but it may not actually hold
1910 * the information for the rest of this free space stuff, so
1911 * look for it, and if we don't find it return so we can try
1912 * everything over again.
1914 search_start
= *offset
;
1915 search_bytes
= ctl
->unit
;
1916 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1918 if (ret
< 0 || search_start
!= *offset
)
1922 } else if (!bitmap_info
->bytes
)
1923 free_bitmap(ctl
, bitmap_info
);
1928 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1929 struct btrfs_free_space
*info
, u64 offset
,
1932 u64 bytes_to_set
= 0;
1935 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1937 bytes_to_set
= min(end
- offset
, bytes
);
1939 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1941 return bytes_to_set
;
1945 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1946 struct btrfs_free_space
*info
)
1948 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1951 * If we are below the extents threshold then we can add this as an
1952 * extent, and don't have to deal with the bitmap
1954 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1956 * If this block group has some small extents we don't want to
1957 * use up all of our free slots in the cache with them, we want
1958 * to reserve them to larger extents, however if we have plent
1959 * of cache left then go ahead an dadd them, no sense in adding
1960 * the overhead of a bitmap if we don't have to.
1962 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1963 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1971 * The original block groups from mkfs can be really small, like 8
1972 * megabytes, so don't bother with a bitmap for those entries. However
1973 * some block groups can be smaller than what a bitmap would cover but
1974 * are still large enough that they could overflow the 32k memory limit,
1975 * so allow those block groups to still be allowed to have a bitmap
1978 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
1984 static struct btrfs_free_space_op free_space_op
= {
1985 .recalc_thresholds
= recalculate_thresholds
,
1986 .use_bitmap
= use_bitmap
,
1989 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1990 struct btrfs_free_space
*info
)
1992 struct btrfs_free_space
*bitmap_info
;
1993 struct btrfs_block_group_cache
*block_group
= NULL
;
1995 u64 bytes
, offset
, bytes_added
;
1998 bytes
= info
->bytes
;
1999 offset
= info
->offset
;
2001 if (!ctl
->op
->use_bitmap(ctl
, info
))
2004 if (ctl
->op
== &free_space_op
)
2005 block_group
= ctl
->private;
2008 * Since we link bitmaps right into the cluster we need to see if we
2009 * have a cluster here, and if so and it has our bitmap we need to add
2010 * the free space to that bitmap.
2012 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
2013 struct btrfs_free_cluster
*cluster
;
2014 struct rb_node
*node
;
2015 struct btrfs_free_space
*entry
;
2017 cluster
= list_entry(block_group
->cluster_list
.next
,
2018 struct btrfs_free_cluster
,
2020 spin_lock(&cluster
->lock
);
2021 node
= rb_first(&cluster
->root
);
2023 spin_unlock(&cluster
->lock
);
2024 goto no_cluster_bitmap
;
2027 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2028 if (!entry
->bitmap
) {
2029 spin_unlock(&cluster
->lock
);
2030 goto no_cluster_bitmap
;
2033 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
2034 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
2036 bytes
-= bytes_added
;
2037 offset
+= bytes_added
;
2039 spin_unlock(&cluster
->lock
);
2047 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2054 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
2055 bytes
-= bytes_added
;
2056 offset
+= bytes_added
;
2066 if (info
&& info
->bitmap
) {
2067 add_new_bitmap(ctl
, info
, offset
);
2072 spin_unlock(&ctl
->tree_lock
);
2074 /* no pre-allocated info, allocate a new one */
2076 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
2079 spin_lock(&ctl
->tree_lock
);
2085 /* allocate the bitmap */
2086 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
2087 spin_lock(&ctl
->tree_lock
);
2088 if (!info
->bitmap
) {
2098 kfree(info
->bitmap
);
2099 kmem_cache_free(btrfs_free_space_cachep
, info
);
2105 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
2106 struct btrfs_free_space
*info
, bool update_stat
)
2108 struct btrfs_free_space
*left_info
;
2109 struct btrfs_free_space
*right_info
;
2110 bool merged
= false;
2111 u64 offset
= info
->offset
;
2112 u64 bytes
= info
->bytes
;
2115 * first we want to see if there is free space adjacent to the range we
2116 * are adding, if there is remove that struct and add a new one to
2117 * cover the entire range
2119 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
2120 if (right_info
&& rb_prev(&right_info
->offset_index
))
2121 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
2122 struct btrfs_free_space
, offset_index
);
2124 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
2126 if (right_info
&& !right_info
->bitmap
) {
2128 unlink_free_space(ctl
, right_info
);
2130 __unlink_free_space(ctl
, right_info
);
2131 info
->bytes
+= right_info
->bytes
;
2132 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
2136 if (left_info
&& !left_info
->bitmap
&&
2137 left_info
->offset
+ left_info
->bytes
== offset
) {
2139 unlink_free_space(ctl
, left_info
);
2141 __unlink_free_space(ctl
, left_info
);
2142 info
->offset
= left_info
->offset
;
2143 info
->bytes
+= left_info
->bytes
;
2144 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2151 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2152 struct btrfs_free_space
*info
,
2155 struct btrfs_free_space
*bitmap
;
2158 const u64 end
= info
->offset
+ info
->bytes
;
2159 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2162 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2166 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2167 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2170 bytes
= (j
- i
) * ctl
->unit
;
2171 info
->bytes
+= bytes
;
2174 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2176 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2179 free_bitmap(ctl
, bitmap
);
2184 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2185 struct btrfs_free_space
*info
,
2188 struct btrfs_free_space
*bitmap
;
2192 unsigned long prev_j
;
2195 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2196 /* If we're on a boundary, try the previous logical bitmap. */
2197 if (bitmap_offset
== info
->offset
) {
2198 if (info
->offset
== 0)
2200 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2203 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2207 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2209 prev_j
= (unsigned long)-1;
2210 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2218 if (prev_j
== (unsigned long)-1)
2219 bytes
= (i
+ 1) * ctl
->unit
;
2221 bytes
= (i
- prev_j
) * ctl
->unit
;
2223 info
->offset
-= bytes
;
2224 info
->bytes
+= bytes
;
2227 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2229 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2232 free_bitmap(ctl
, bitmap
);
2238 * We prefer always to allocate from extent entries, both for clustered and
2239 * non-clustered allocation requests. So when attempting to add a new extent
2240 * entry, try to see if there's adjacent free space in bitmap entries, and if
2241 * there is, migrate that space from the bitmaps to the extent.
2242 * Like this we get better chances of satisfying space allocation requests
2243 * because we attempt to satisfy them based on a single cache entry, and never
2244 * on 2 or more entries - even if the entries represent a contiguous free space
2245 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2248 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2249 struct btrfs_free_space
*info
,
2253 * Only work with disconnected entries, as we can change their offset,
2254 * and must be extent entries.
2256 ASSERT(!info
->bitmap
);
2257 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2259 if (ctl
->total_bitmaps
> 0) {
2261 bool stole_front
= false;
2263 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2264 if (ctl
->total_bitmaps
> 0)
2265 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2268 if (stole_end
|| stole_front
)
2269 try_merge_free_space(ctl
, info
, update_stat
);
2273 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
2274 u64 offset
, u64 bytes
)
2276 struct btrfs_free_space
*info
;
2279 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2283 info
->offset
= offset
;
2284 info
->bytes
= bytes
;
2285 RB_CLEAR_NODE(&info
->offset_index
);
2287 spin_lock(&ctl
->tree_lock
);
2289 if (try_merge_free_space(ctl
, info
, true))
2293 * There was no extent directly to the left or right of this new
2294 * extent then we know we're going to have to allocate a new extent, so
2295 * before we do that see if we need to drop this into a bitmap
2297 ret
= insert_into_bitmap(ctl
, info
);
2306 * Only steal free space from adjacent bitmaps if we're sure we're not
2307 * going to add the new free space to existing bitmap entries - because
2308 * that would mean unnecessary work that would be reverted. Therefore
2309 * attempt to steal space from bitmaps if we're adding an extent entry.
2311 steal_from_bitmap(ctl
, info
, true);
2313 ret
= link_free_space(ctl
, info
);
2315 kmem_cache_free(btrfs_free_space_cachep
, info
);
2317 spin_unlock(&ctl
->tree_lock
);
2320 printk(KERN_CRIT
"BTRFS: unable to add free space :%d\n", ret
);
2321 ASSERT(ret
!= -EEXIST
);
2327 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2328 u64 offset
, u64 bytes
)
2330 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2331 struct btrfs_free_space
*info
;
2333 bool re_search
= false;
2335 spin_lock(&ctl
->tree_lock
);
2342 info
= tree_search_offset(ctl
, offset
, 0, 0);
2345 * oops didn't find an extent that matched the space we wanted
2346 * to remove, look for a bitmap instead
2348 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2352 * If we found a partial bit of our free space in a
2353 * bitmap but then couldn't find the other part this may
2354 * be a problem, so WARN about it.
2362 if (!info
->bitmap
) {
2363 unlink_free_space(ctl
, info
);
2364 if (offset
== info
->offset
) {
2365 u64 to_free
= min(bytes
, info
->bytes
);
2367 info
->bytes
-= to_free
;
2368 info
->offset
+= to_free
;
2370 ret
= link_free_space(ctl
, info
);
2373 kmem_cache_free(btrfs_free_space_cachep
, info
);
2380 u64 old_end
= info
->bytes
+ info
->offset
;
2382 info
->bytes
= offset
- info
->offset
;
2383 ret
= link_free_space(ctl
, info
);
2388 /* Not enough bytes in this entry to satisfy us */
2389 if (old_end
< offset
+ bytes
) {
2390 bytes
-= old_end
- offset
;
2393 } else if (old_end
== offset
+ bytes
) {
2397 spin_unlock(&ctl
->tree_lock
);
2399 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2400 old_end
- (offset
+ bytes
));
2406 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2407 if (ret
== -EAGAIN
) {
2412 spin_unlock(&ctl
->tree_lock
);
2417 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2420 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2421 struct btrfs_free_space
*info
;
2425 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2426 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2427 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2429 btrfs_crit(block_group
->fs_info
,
2430 "entry offset %llu, bytes %llu, bitmap %s",
2431 info
->offset
, info
->bytes
,
2432 (info
->bitmap
) ? "yes" : "no");
2434 btrfs_info(block_group
->fs_info
, "block group has cluster?: %s",
2435 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2436 btrfs_info(block_group
->fs_info
,
2437 "%d blocks of free space at or bigger than bytes is", count
);
2440 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2442 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2444 spin_lock_init(&ctl
->tree_lock
);
2445 ctl
->unit
= block_group
->sectorsize
;
2446 ctl
->start
= block_group
->key
.objectid
;
2447 ctl
->private = block_group
;
2448 ctl
->op
= &free_space_op
;
2449 INIT_LIST_HEAD(&ctl
->trimming_ranges
);
2450 mutex_init(&ctl
->cache_writeout_mutex
);
2453 * we only want to have 32k of ram per block group for keeping
2454 * track of free space, and if we pass 1/2 of that we want to
2455 * start converting things over to using bitmaps
2457 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2458 sizeof(struct btrfs_free_space
);
2462 * for a given cluster, put all of its extents back into the free
2463 * space cache. If the block group passed doesn't match the block group
2464 * pointed to by the cluster, someone else raced in and freed the
2465 * cluster already. In that case, we just return without changing anything
2468 __btrfs_return_cluster_to_free_space(
2469 struct btrfs_block_group_cache
*block_group
,
2470 struct btrfs_free_cluster
*cluster
)
2472 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2473 struct btrfs_free_space
*entry
;
2474 struct rb_node
*node
;
2476 spin_lock(&cluster
->lock
);
2477 if (cluster
->block_group
!= block_group
)
2480 cluster
->block_group
= NULL
;
2481 cluster
->window_start
= 0;
2482 list_del_init(&cluster
->block_group_list
);
2484 node
= rb_first(&cluster
->root
);
2488 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2489 node
= rb_next(&entry
->offset_index
);
2490 rb_erase(&entry
->offset_index
, &cluster
->root
);
2491 RB_CLEAR_NODE(&entry
->offset_index
);
2493 bitmap
= (entry
->bitmap
!= NULL
);
2495 try_merge_free_space(ctl
, entry
, false);
2496 steal_from_bitmap(ctl
, entry
, false);
2498 tree_insert_offset(&ctl
->free_space_offset
,
2499 entry
->offset
, &entry
->offset_index
, bitmap
);
2501 cluster
->root
= RB_ROOT
;
2504 spin_unlock(&cluster
->lock
);
2505 btrfs_put_block_group(block_group
);
2509 static void __btrfs_remove_free_space_cache_locked(
2510 struct btrfs_free_space_ctl
*ctl
)
2512 struct btrfs_free_space
*info
;
2513 struct rb_node
*node
;
2515 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2516 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2517 if (!info
->bitmap
) {
2518 unlink_free_space(ctl
, info
);
2519 kmem_cache_free(btrfs_free_space_cachep
, info
);
2521 free_bitmap(ctl
, info
);
2524 cond_resched_lock(&ctl
->tree_lock
);
2528 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2530 spin_lock(&ctl
->tree_lock
);
2531 __btrfs_remove_free_space_cache_locked(ctl
);
2532 spin_unlock(&ctl
->tree_lock
);
2535 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2537 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2538 struct btrfs_free_cluster
*cluster
;
2539 struct list_head
*head
;
2541 spin_lock(&ctl
->tree_lock
);
2542 while ((head
= block_group
->cluster_list
.next
) !=
2543 &block_group
->cluster_list
) {
2544 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2547 WARN_ON(cluster
->block_group
!= block_group
);
2548 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2550 cond_resched_lock(&ctl
->tree_lock
);
2552 __btrfs_remove_free_space_cache_locked(ctl
);
2553 spin_unlock(&ctl
->tree_lock
);
2557 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2558 u64 offset
, u64 bytes
, u64 empty_size
,
2559 u64
*max_extent_size
)
2561 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2562 struct btrfs_free_space
*entry
= NULL
;
2563 u64 bytes_search
= bytes
+ empty_size
;
2566 u64 align_gap_len
= 0;
2568 spin_lock(&ctl
->tree_lock
);
2569 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2570 block_group
->full_stripe_len
, max_extent_size
);
2575 if (entry
->bitmap
) {
2576 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2578 free_bitmap(ctl
, entry
);
2580 unlink_free_space(ctl
, entry
);
2581 align_gap_len
= offset
- entry
->offset
;
2582 align_gap
= entry
->offset
;
2584 entry
->offset
= offset
+ bytes
;
2585 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2587 entry
->bytes
-= bytes
+ align_gap_len
;
2589 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2591 link_free_space(ctl
, entry
);
2594 spin_unlock(&ctl
->tree_lock
);
2597 __btrfs_add_free_space(ctl
, align_gap
, align_gap_len
);
2602 * given a cluster, put all of its extents back into the free space
2603 * cache. If a block group is passed, this function will only free
2604 * a cluster that belongs to the passed block group.
2606 * Otherwise, it'll get a reference on the block group pointed to by the
2607 * cluster and remove the cluster from it.
2609 int btrfs_return_cluster_to_free_space(
2610 struct btrfs_block_group_cache
*block_group
,
2611 struct btrfs_free_cluster
*cluster
)
2613 struct btrfs_free_space_ctl
*ctl
;
2616 /* first, get a safe pointer to the block group */
2617 spin_lock(&cluster
->lock
);
2619 block_group
= cluster
->block_group
;
2621 spin_unlock(&cluster
->lock
);
2624 } else if (cluster
->block_group
!= block_group
) {
2625 /* someone else has already freed it don't redo their work */
2626 spin_unlock(&cluster
->lock
);
2629 atomic_inc(&block_group
->count
);
2630 spin_unlock(&cluster
->lock
);
2632 ctl
= block_group
->free_space_ctl
;
2634 /* now return any extents the cluster had on it */
2635 spin_lock(&ctl
->tree_lock
);
2636 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2637 spin_unlock(&ctl
->tree_lock
);
2639 /* finally drop our ref */
2640 btrfs_put_block_group(block_group
);
2644 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2645 struct btrfs_free_cluster
*cluster
,
2646 struct btrfs_free_space
*entry
,
2647 u64 bytes
, u64 min_start
,
2648 u64
*max_extent_size
)
2650 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2652 u64 search_start
= cluster
->window_start
;
2653 u64 search_bytes
= bytes
;
2656 search_start
= min_start
;
2657 search_bytes
= bytes
;
2659 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2661 if (search_bytes
> *max_extent_size
)
2662 *max_extent_size
= search_bytes
;
2667 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2673 * given a cluster, try to allocate 'bytes' from it, returns 0
2674 * if it couldn't find anything suitably large, or a logical disk offset
2675 * if things worked out
2677 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2678 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2679 u64 min_start
, u64
*max_extent_size
)
2681 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2682 struct btrfs_free_space
*entry
= NULL
;
2683 struct rb_node
*node
;
2686 spin_lock(&cluster
->lock
);
2687 if (bytes
> cluster
->max_size
)
2690 if (cluster
->block_group
!= block_group
)
2693 node
= rb_first(&cluster
->root
);
2697 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2699 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2700 *max_extent_size
= entry
->bytes
;
2702 if (entry
->bytes
< bytes
||
2703 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2704 node
= rb_next(&entry
->offset_index
);
2707 entry
= rb_entry(node
, struct btrfs_free_space
,
2712 if (entry
->bitmap
) {
2713 ret
= btrfs_alloc_from_bitmap(block_group
,
2714 cluster
, entry
, bytes
,
2715 cluster
->window_start
,
2718 node
= rb_next(&entry
->offset_index
);
2721 entry
= rb_entry(node
, struct btrfs_free_space
,
2725 cluster
->window_start
+= bytes
;
2727 ret
= entry
->offset
;
2729 entry
->offset
+= bytes
;
2730 entry
->bytes
-= bytes
;
2733 if (entry
->bytes
== 0)
2734 rb_erase(&entry
->offset_index
, &cluster
->root
);
2738 spin_unlock(&cluster
->lock
);
2743 spin_lock(&ctl
->tree_lock
);
2745 ctl
->free_space
-= bytes
;
2746 if (entry
->bytes
== 0) {
2747 ctl
->free_extents
--;
2748 if (entry
->bitmap
) {
2749 kfree(entry
->bitmap
);
2750 ctl
->total_bitmaps
--;
2751 ctl
->op
->recalc_thresholds(ctl
);
2753 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2756 spin_unlock(&ctl
->tree_lock
);
2761 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2762 struct btrfs_free_space
*entry
,
2763 struct btrfs_free_cluster
*cluster
,
2764 u64 offset
, u64 bytes
,
2765 u64 cont1_bytes
, u64 min_bytes
)
2767 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2768 unsigned long next_zero
;
2770 unsigned long want_bits
;
2771 unsigned long min_bits
;
2772 unsigned long found_bits
;
2773 unsigned long start
= 0;
2774 unsigned long total_found
= 0;
2777 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2778 max_t(u64
, offset
, entry
->offset
));
2779 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2780 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2784 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2785 next_zero
= find_next_zero_bit(entry
->bitmap
,
2786 BITS_PER_BITMAP
, i
);
2787 if (next_zero
- i
>= min_bits
) {
2788 found_bits
= next_zero
- i
;
2799 cluster
->max_size
= 0;
2802 total_found
+= found_bits
;
2804 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2805 cluster
->max_size
= found_bits
* ctl
->unit
;
2807 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2812 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2813 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2814 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2815 &entry
->offset_index
, 1);
2816 ASSERT(!ret
); /* -EEXIST; Logic error */
2818 trace_btrfs_setup_cluster(block_group
, cluster
,
2819 total_found
* ctl
->unit
, 1);
2824 * This searches the block group for just extents to fill the cluster with.
2825 * Try to find a cluster with at least bytes total bytes, at least one
2826 * extent of cont1_bytes, and other clusters of at least min_bytes.
2829 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2830 struct btrfs_free_cluster
*cluster
,
2831 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2832 u64 cont1_bytes
, u64 min_bytes
)
2834 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2835 struct btrfs_free_space
*first
= NULL
;
2836 struct btrfs_free_space
*entry
= NULL
;
2837 struct btrfs_free_space
*last
;
2838 struct rb_node
*node
;
2843 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2848 * We don't want bitmaps, so just move along until we find a normal
2851 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2852 if (entry
->bitmap
&& list_empty(&entry
->list
))
2853 list_add_tail(&entry
->list
, bitmaps
);
2854 node
= rb_next(&entry
->offset_index
);
2857 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2860 window_free
= entry
->bytes
;
2861 max_extent
= entry
->bytes
;
2865 for (node
= rb_next(&entry
->offset_index
); node
;
2866 node
= rb_next(&entry
->offset_index
)) {
2867 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2869 if (entry
->bitmap
) {
2870 if (list_empty(&entry
->list
))
2871 list_add_tail(&entry
->list
, bitmaps
);
2875 if (entry
->bytes
< min_bytes
)
2879 window_free
+= entry
->bytes
;
2880 if (entry
->bytes
> max_extent
)
2881 max_extent
= entry
->bytes
;
2884 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2887 cluster
->window_start
= first
->offset
;
2889 node
= &first
->offset_index
;
2892 * now we've found our entries, pull them out of the free space
2893 * cache and put them into the cluster rbtree
2898 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2899 node
= rb_next(&entry
->offset_index
);
2900 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2903 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2904 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2905 &entry
->offset_index
, 0);
2906 total_size
+= entry
->bytes
;
2907 ASSERT(!ret
); /* -EEXIST; Logic error */
2908 } while (node
&& entry
!= last
);
2910 cluster
->max_size
= max_extent
;
2911 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2916 * This specifically looks for bitmaps that may work in the cluster, we assume
2917 * that we have already failed to find extents that will work.
2920 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2921 struct btrfs_free_cluster
*cluster
,
2922 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2923 u64 cont1_bytes
, u64 min_bytes
)
2925 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2926 struct btrfs_free_space
*entry
;
2928 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2930 if (ctl
->total_bitmaps
== 0)
2934 * The bitmap that covers offset won't be in the list unless offset
2935 * is just its start offset.
2937 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2938 if (entry
->offset
!= bitmap_offset
) {
2939 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2940 if (entry
&& list_empty(&entry
->list
))
2941 list_add(&entry
->list
, bitmaps
);
2944 list_for_each_entry(entry
, bitmaps
, list
) {
2945 if (entry
->bytes
< bytes
)
2947 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2948 bytes
, cont1_bytes
, min_bytes
);
2954 * The bitmaps list has all the bitmaps that record free space
2955 * starting after offset, so no more search is required.
2961 * here we try to find a cluster of blocks in a block group. The goal
2962 * is to find at least bytes+empty_size.
2963 * We might not find them all in one contiguous area.
2965 * returns zero and sets up cluster if things worked out, otherwise
2966 * it returns -enospc
2968 int btrfs_find_space_cluster(struct btrfs_root
*root
,
2969 struct btrfs_block_group_cache
*block_group
,
2970 struct btrfs_free_cluster
*cluster
,
2971 u64 offset
, u64 bytes
, u64 empty_size
)
2973 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2974 struct btrfs_free_space
*entry
, *tmp
;
2981 * Choose the minimum extent size we'll require for this
2982 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2983 * For metadata, allow allocates with smaller extents. For
2984 * data, keep it dense.
2986 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2987 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
2988 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2989 cont1_bytes
= bytes
;
2990 min_bytes
= block_group
->sectorsize
;
2992 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2993 min_bytes
= block_group
->sectorsize
;
2996 spin_lock(&ctl
->tree_lock
);
2999 * If we know we don't have enough space to make a cluster don't even
3000 * bother doing all the work to try and find one.
3002 if (ctl
->free_space
< bytes
) {
3003 spin_unlock(&ctl
->tree_lock
);
3007 spin_lock(&cluster
->lock
);
3009 /* someone already found a cluster, hooray */
3010 if (cluster
->block_group
) {
3015 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
3018 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
3020 cont1_bytes
, min_bytes
);
3022 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
3023 offset
, bytes
+ empty_size
,
3024 cont1_bytes
, min_bytes
);
3026 /* Clear our temporary list */
3027 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
3028 list_del_init(&entry
->list
);
3031 atomic_inc(&block_group
->count
);
3032 list_add_tail(&cluster
->block_group_list
,
3033 &block_group
->cluster_list
);
3034 cluster
->block_group
= block_group
;
3036 trace_btrfs_failed_cluster_setup(block_group
);
3039 spin_unlock(&cluster
->lock
);
3040 spin_unlock(&ctl
->tree_lock
);
3046 * simple code to zero out a cluster
3048 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
3050 spin_lock_init(&cluster
->lock
);
3051 spin_lock_init(&cluster
->refill_lock
);
3052 cluster
->root
= RB_ROOT
;
3053 cluster
->max_size
= 0;
3054 INIT_LIST_HEAD(&cluster
->block_group_list
);
3055 cluster
->block_group
= NULL
;
3058 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
3059 u64
*total_trimmed
, u64 start
, u64 bytes
,
3060 u64 reserved_start
, u64 reserved_bytes
,
3061 struct btrfs_trim_range
*trim_entry
)
3063 struct btrfs_space_info
*space_info
= block_group
->space_info
;
3064 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3065 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3070 spin_lock(&space_info
->lock
);
3071 spin_lock(&block_group
->lock
);
3072 if (!block_group
->ro
) {
3073 block_group
->reserved
+= reserved_bytes
;
3074 space_info
->bytes_reserved
+= reserved_bytes
;
3077 spin_unlock(&block_group
->lock
);
3078 spin_unlock(&space_info
->lock
);
3080 ret
= btrfs_discard_extent(fs_info
->extent_root
,
3081 start
, bytes
, &trimmed
);
3083 *total_trimmed
+= trimmed
;
3085 mutex_lock(&ctl
->cache_writeout_mutex
);
3086 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
3087 list_del(&trim_entry
->list
);
3088 mutex_unlock(&ctl
->cache_writeout_mutex
);
3091 spin_lock(&space_info
->lock
);
3092 spin_lock(&block_group
->lock
);
3093 if (block_group
->ro
)
3094 space_info
->bytes_readonly
+= reserved_bytes
;
3095 block_group
->reserved
-= reserved_bytes
;
3096 space_info
->bytes_reserved
-= reserved_bytes
;
3097 spin_unlock(&space_info
->lock
);
3098 spin_unlock(&block_group
->lock
);
3104 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
3105 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3107 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3108 struct btrfs_free_space
*entry
;
3109 struct rb_node
*node
;
3115 while (start
< end
) {
3116 struct btrfs_trim_range trim_entry
;
3118 mutex_lock(&ctl
->cache_writeout_mutex
);
3119 spin_lock(&ctl
->tree_lock
);
3121 if (ctl
->free_space
< minlen
) {
3122 spin_unlock(&ctl
->tree_lock
);
3123 mutex_unlock(&ctl
->cache_writeout_mutex
);
3127 entry
= tree_search_offset(ctl
, start
, 0, 1);
3129 spin_unlock(&ctl
->tree_lock
);
3130 mutex_unlock(&ctl
->cache_writeout_mutex
);
3135 while (entry
->bitmap
) {
3136 node
= rb_next(&entry
->offset_index
);
3138 spin_unlock(&ctl
->tree_lock
);
3139 mutex_unlock(&ctl
->cache_writeout_mutex
);
3142 entry
= rb_entry(node
, struct btrfs_free_space
,
3146 if (entry
->offset
>= end
) {
3147 spin_unlock(&ctl
->tree_lock
);
3148 mutex_unlock(&ctl
->cache_writeout_mutex
);
3152 extent_start
= entry
->offset
;
3153 extent_bytes
= entry
->bytes
;
3154 start
= max(start
, extent_start
);
3155 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
3156 if (bytes
< minlen
) {
3157 spin_unlock(&ctl
->tree_lock
);
3158 mutex_unlock(&ctl
->cache_writeout_mutex
);
3162 unlink_free_space(ctl
, entry
);
3163 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3165 spin_unlock(&ctl
->tree_lock
);
3166 trim_entry
.start
= extent_start
;
3167 trim_entry
.bytes
= extent_bytes
;
3168 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3169 mutex_unlock(&ctl
->cache_writeout_mutex
);
3171 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3172 extent_start
, extent_bytes
, &trim_entry
);
3178 if (fatal_signal_pending(current
)) {
3189 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3190 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3192 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3193 struct btrfs_free_space
*entry
;
3197 u64 offset
= offset_to_bitmap(ctl
, start
);
3199 while (offset
< end
) {
3200 bool next_bitmap
= false;
3201 struct btrfs_trim_range trim_entry
;
3203 mutex_lock(&ctl
->cache_writeout_mutex
);
3204 spin_lock(&ctl
->tree_lock
);
3206 if (ctl
->free_space
< minlen
) {
3207 spin_unlock(&ctl
->tree_lock
);
3208 mutex_unlock(&ctl
->cache_writeout_mutex
);
3212 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3214 spin_unlock(&ctl
->tree_lock
);
3215 mutex_unlock(&ctl
->cache_writeout_mutex
);
3221 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
);
3222 if (ret2
|| start
>= end
) {
3223 spin_unlock(&ctl
->tree_lock
);
3224 mutex_unlock(&ctl
->cache_writeout_mutex
);
3229 bytes
= min(bytes
, end
- start
);
3230 if (bytes
< minlen
) {
3231 spin_unlock(&ctl
->tree_lock
);
3232 mutex_unlock(&ctl
->cache_writeout_mutex
);
3236 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3237 if (entry
->bytes
== 0)
3238 free_bitmap(ctl
, entry
);
3240 spin_unlock(&ctl
->tree_lock
);
3241 trim_entry
.start
= start
;
3242 trim_entry
.bytes
= bytes
;
3243 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3244 mutex_unlock(&ctl
->cache_writeout_mutex
);
3246 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3247 start
, bytes
, &trim_entry
);
3252 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3255 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3256 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3259 if (fatal_signal_pending(current
)) {
3270 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3271 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3277 spin_lock(&block_group
->lock
);
3278 if (block_group
->removed
) {
3279 spin_unlock(&block_group
->lock
);
3282 atomic_inc(&block_group
->trimming
);
3283 spin_unlock(&block_group
->lock
);
3285 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3289 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3291 spin_lock(&block_group
->lock
);
3292 if (atomic_dec_and_test(&block_group
->trimming
) &&
3293 block_group
->removed
) {
3294 struct extent_map_tree
*em_tree
;
3295 struct extent_map
*em
;
3297 spin_unlock(&block_group
->lock
);
3299 lock_chunks(block_group
->fs_info
->chunk_root
);
3300 em_tree
= &block_group
->fs_info
->mapping_tree
.map_tree
;
3301 write_lock(&em_tree
->lock
);
3302 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3304 BUG_ON(!em
); /* logic error, can't happen */
3306 * remove_extent_mapping() will delete us from the pinned_chunks
3307 * list, which is protected by the chunk mutex.
3309 remove_extent_mapping(em_tree
, em
);
3310 write_unlock(&em_tree
->lock
);
3311 unlock_chunks(block_group
->fs_info
->chunk_root
);
3313 /* once for us and once for the tree */
3314 free_extent_map(em
);
3315 free_extent_map(em
);
3318 * We've left one free space entry and other tasks trimming
3319 * this block group have left 1 entry each one. Free them.
3321 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
3323 spin_unlock(&block_group
->lock
);
3330 * Find the left-most item in the cache tree, and then return the
3331 * smallest inode number in the item.
3333 * Note: the returned inode number may not be the smallest one in
3334 * the tree, if the left-most item is a bitmap.
3336 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3338 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3339 struct btrfs_free_space
*entry
= NULL
;
3342 spin_lock(&ctl
->tree_lock
);
3344 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3347 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3348 struct btrfs_free_space
, offset_index
);
3350 if (!entry
->bitmap
) {
3351 ino
= entry
->offset
;
3353 unlink_free_space(ctl
, entry
);
3357 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3359 link_free_space(ctl
, entry
);
3365 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
3366 /* Logic error; Should be empty if it can't find anything */
3370 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3371 if (entry
->bytes
== 0)
3372 free_bitmap(ctl
, entry
);
3375 spin_unlock(&ctl
->tree_lock
);
3380 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3381 struct btrfs_path
*path
)
3383 struct inode
*inode
= NULL
;
3385 spin_lock(&root
->ino_cache_lock
);
3386 if (root
->ino_cache_inode
)
3387 inode
= igrab(root
->ino_cache_inode
);
3388 spin_unlock(&root
->ino_cache_lock
);
3392 inode
= __lookup_free_space_inode(root
, path
, 0);
3396 spin_lock(&root
->ino_cache_lock
);
3397 if (!btrfs_fs_closing(root
->fs_info
))
3398 root
->ino_cache_inode
= igrab(inode
);
3399 spin_unlock(&root
->ino_cache_lock
);
3404 int create_free_ino_inode(struct btrfs_root
*root
,
3405 struct btrfs_trans_handle
*trans
,
3406 struct btrfs_path
*path
)
3408 return __create_free_space_inode(root
, trans
, path
,
3409 BTRFS_FREE_INO_OBJECTID
, 0);
3412 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3414 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3415 struct btrfs_path
*path
;
3416 struct inode
*inode
;
3418 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3420 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3424 * If we're unmounting then just return, since this does a search on the
3425 * normal root and not the commit root and we could deadlock.
3427 if (btrfs_fs_closing(fs_info
))
3430 path
= btrfs_alloc_path();
3434 inode
= lookup_free_ino_inode(root
, path
);
3438 if (root_gen
!= BTRFS_I(inode
)->generation
)
3441 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3445 "failed to load free ino cache for root %llu",
3446 root
->root_key
.objectid
);
3450 btrfs_free_path(path
);
3454 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3455 struct btrfs_trans_handle
*trans
,
3456 struct btrfs_path
*path
,
3457 struct inode
*inode
)
3459 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3461 struct btrfs_io_ctl io_ctl
;
3463 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3466 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, &io_ctl
,
3468 btrfs_wait_cache_io(root
, trans
, NULL
, &io_ctl
, path
, 0);
3470 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
3472 btrfs_err(root
->fs_info
,
3473 "failed to write free ino cache for root %llu",
3474 root
->root_key
.objectid
);
3481 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3483 * Use this if you need to make a bitmap or extent entry specifically, it
3484 * doesn't do any of the merging that add_free_space does, this acts a lot like
3485 * how the free space cache loading stuff works, so you can get really weird
3488 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3489 u64 offset
, u64 bytes
, bool bitmap
)
3491 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3492 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3499 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3505 spin_lock(&ctl
->tree_lock
);
3506 info
->offset
= offset
;
3507 info
->bytes
= bytes
;
3508 ret
= link_free_space(ctl
, info
);
3509 spin_unlock(&ctl
->tree_lock
);
3511 kmem_cache_free(btrfs_free_space_cachep
, info
);
3516 map
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
3518 kmem_cache_free(btrfs_free_space_cachep
, info
);
3523 spin_lock(&ctl
->tree_lock
);
3524 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3529 add_new_bitmap(ctl
, info
, offset
);
3534 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3535 bytes
-= bytes_added
;
3536 offset
+= bytes_added
;
3537 spin_unlock(&ctl
->tree_lock
);
3543 kmem_cache_free(btrfs_free_space_cachep
, info
);
3550 * Checks to see if the given range is in the free space cache. This is really
3551 * just used to check the absence of space, so if there is free space in the
3552 * range at all we will return 1.
3554 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3555 u64 offset
, u64 bytes
)
3557 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3558 struct btrfs_free_space
*info
;
3561 spin_lock(&ctl
->tree_lock
);
3562 info
= tree_search_offset(ctl
, offset
, 0, 0);
3564 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3572 u64 bit_off
, bit_bytes
;
3574 struct btrfs_free_space
*tmp
;
3577 bit_bytes
= ctl
->unit
;
3578 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
);
3580 if (bit_off
== offset
) {
3583 } else if (bit_off
> offset
&&
3584 offset
+ bytes
> bit_off
) {
3590 n
= rb_prev(&info
->offset_index
);
3592 tmp
= rb_entry(n
, struct btrfs_free_space
,
3594 if (tmp
->offset
+ tmp
->bytes
< offset
)
3596 if (offset
+ bytes
< tmp
->offset
) {
3597 n
= rb_prev(&info
->offset_index
);
3604 n
= rb_next(&info
->offset_index
);
3606 tmp
= rb_entry(n
, struct btrfs_free_space
,
3608 if (offset
+ bytes
< tmp
->offset
)
3610 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3611 n
= rb_next(&info
->offset_index
);
3622 if (info
->offset
== offset
) {
3627 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3630 spin_unlock(&ctl
->tree_lock
);
3633 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */