2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "extent_io.h"
28 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
30 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
32 return entry
->file_offset
+ entry
->len
;
35 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
38 struct rb_node
** p
= &root
->rb_node
;
39 struct rb_node
* parent
= NULL
;
40 struct btrfs_ordered_extent
*entry
;
44 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
46 if (file_offset
< entry
->file_offset
)
48 else if (file_offset
>= entry_end(entry
))
54 rb_link_node(node
, parent
, p
);
55 rb_insert_color(node
, root
);
59 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
60 struct rb_node
**prev_ret
)
62 struct rb_node
* n
= root
->rb_node
;
63 struct rb_node
*prev
= NULL
;
65 struct btrfs_ordered_extent
*entry
;
66 struct btrfs_ordered_extent
*prev_entry
= NULL
;
69 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
73 if (file_offset
< entry
->file_offset
)
75 else if (file_offset
>= entry_end(entry
))
83 while(prev
&& file_offset
>= entry_end(prev_entry
)) {
87 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
89 if (file_offset
< entry_end(prev_entry
))
95 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
97 while(prev
&& file_offset
< entry_end(prev_entry
)) {
101 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
109 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
111 if (file_offset
< entry
->file_offset
||
112 entry
->file_offset
+ entry
->len
<= file_offset
)
117 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
120 struct rb_root
*root
= &tree
->tree
;
121 struct rb_node
*prev
;
123 struct btrfs_ordered_extent
*entry
;
126 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
128 if (offset_in_entry(entry
, file_offset
))
131 ret
= __tree_search(root
, file_offset
, &prev
);
139 /* allocate and add a new ordered_extent into the per-inode tree.
140 * file_offset is the logical offset in the file
142 * start is the disk block number of an extent already reserved in the
143 * extent allocation tree
145 * len is the length of the extent
147 * This also sets the EXTENT_ORDERED bit on the range in the inode.
149 * The tree is given a single reference on the ordered extent that was
152 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
155 struct btrfs_ordered_inode_tree
*tree
;
156 struct rb_node
*node
;
157 struct btrfs_ordered_extent
*entry
;
159 tree
= &BTRFS_I(inode
)->ordered_tree
;
160 entry
= kzalloc(sizeof(*entry
), GFP_NOFS
);
164 mutex_lock(&tree
->mutex
);
165 entry
->file_offset
= file_offset
;
166 entry
->start
= start
;
168 /* one ref for the tree */
169 atomic_set(&entry
->refs
, 1);
170 init_waitqueue_head(&entry
->wait
);
171 INIT_LIST_HEAD(&entry
->list
);
173 node
= tree_insert(&tree
->tree
, file_offset
,
176 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
177 atomic_inc(&entry
->refs
);
179 set_extent_ordered(&BTRFS_I(inode
)->io_tree
, file_offset
,
180 entry_end(entry
) - 1, GFP_NOFS
);
182 mutex_unlock(&tree
->mutex
);
188 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
189 * when an ordered extent is finished. If the list covers more than one
190 * ordered extent, it is split across multiples.
192 int btrfs_add_ordered_sum(struct inode
*inode
,
193 struct btrfs_ordered_extent
*entry
,
194 struct btrfs_ordered_sum
*sum
)
196 struct btrfs_ordered_inode_tree
*tree
;
198 tree
= &BTRFS_I(inode
)->ordered_tree
;
199 mutex_lock(&tree
->mutex
);
200 list_add_tail(&sum
->list
, &entry
->list
);
201 mutex_unlock(&tree
->mutex
);
206 * this is used to account for finished IO across a given range
207 * of the file. The IO should not span ordered extents. If
208 * a given ordered_extent is completely done, 1 is returned, otherwise
211 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
212 * to make sure this function only returns 1 once for a given ordered extent.
214 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
215 u64 file_offset
, u64 io_size
)
217 struct btrfs_ordered_inode_tree
*tree
;
218 struct rb_node
*node
;
219 struct btrfs_ordered_extent
*entry
;
220 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
223 tree
= &BTRFS_I(inode
)->ordered_tree
;
224 mutex_lock(&tree
->mutex
);
225 clear_extent_ordered(io_tree
, file_offset
, file_offset
+ io_size
- 1,
227 node
= tree_search(tree
, file_offset
);
233 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
234 if (!offset_in_entry(entry
, file_offset
)) {
239 ret
= test_range_bit(io_tree
, entry
->file_offset
,
240 entry
->file_offset
+ entry
->len
- 1,
243 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
245 mutex_unlock(&tree
->mutex
);
250 * used to drop a reference on an ordered extent. This will free
251 * the extent if the last reference is dropped
253 int btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
255 struct list_head
*cur
;
256 struct btrfs_ordered_sum
*sum
;
258 if (atomic_dec_and_test(&entry
->refs
)) {
259 while(!list_empty(&entry
->list
)) {
260 cur
= entry
->list
.next
;
261 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
262 list_del(&sum
->list
);
271 * remove an ordered extent from the tree. No references are dropped
272 * but, anyone waiting on this extent is woken up.
274 int btrfs_remove_ordered_extent(struct inode
*inode
,
275 struct btrfs_ordered_extent
*entry
)
277 struct btrfs_ordered_inode_tree
*tree
;
278 struct rb_node
*node
;
280 tree
= &BTRFS_I(inode
)->ordered_tree
;
281 mutex_lock(&tree
->mutex
);
282 node
= &entry
->rb_node
;
283 rb_erase(node
, &tree
->tree
);
285 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
286 mutex_unlock(&tree
->mutex
);
287 wake_up(&entry
->wait
);
292 * Used to start IO or wait for a given ordered extent to finish.
294 * If wait is one, this effectively waits on page writeback for all the pages
295 * in the extent, and it waits on the io completion code to insert
296 * metadata into the btree corresponding to the extent
298 void btrfs_start_ordered_extent(struct inode
*inode
,
299 struct btrfs_ordered_extent
*entry
,
302 u64 start
= entry
->file_offset
;
303 u64 end
= start
+ entry
->len
- 1;
306 * pages in the range can be dirty, clean or writeback. We
307 * start IO on any dirty ones so the wait doesn't stall waiting
308 * for pdflush to find them
310 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
311 do_sync_file_range(file
, start
, end
, SYNC_FILE_RANGE_WRITE
);
313 do_sync_mapping_range(inode
->i_mapping
, start
, end
,
314 SYNC_FILE_RANGE_WRITE
);
317 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
322 * Used to wait on ordered extents across a large range of bytes.
324 void btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
329 struct btrfs_ordered_extent
*ordered
;
330 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
332 if (start
+ len
< start
) {
333 wait_end
= (inode
->i_size
+ mask
) & ~mask
;
336 orig_end
= start
+ len
- 1;
340 /* start IO across the range first to instantiate any delalloc
343 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
344 do_sync_file_range(file
, start
, wait_end
, SYNC_FILE_RANGE_WRITE
);
346 do_sync_mapping_range(inode
->i_mapping
, start
, wait_end
,
347 SYNC_FILE_RANGE_WRITE
);
350 wait_on_extent_writeback(&BTRFS_I(inode
)->io_tree
, start
, orig_end
);
353 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
357 if (ordered
->file_offset
> orig_end
) {
358 btrfs_put_ordered_extent(ordered
);
361 if (ordered
->file_offset
+ ordered
->len
< start
) {
362 btrfs_put_ordered_extent(ordered
);
365 btrfs_start_ordered_extent(inode
, ordered
, 1);
366 end
= ordered
->file_offset
;
367 btrfs_put_ordered_extent(ordered
);
368 if (end
== 0 || end
== start
)
372 if (test_range_bit(&BTRFS_I(inode
)->io_tree
, start
, orig_end
,
373 EXTENT_ORDERED
| EXTENT_DELALLOC
, 0)) {
374 printk("inode %lu still ordered or delalloc after wait "
375 "%llu %llu\n", inode
->i_ino
,
376 (unsigned long long)start
,
377 (unsigned long long)orig_end
);
383 * find an ordered extent corresponding to file_offset. return NULL if
384 * nothing is found, otherwise take a reference on the extent and return it
386 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
389 struct btrfs_ordered_inode_tree
*tree
;
390 struct rb_node
*node
;
391 struct btrfs_ordered_extent
*entry
= NULL
;
393 tree
= &BTRFS_I(inode
)->ordered_tree
;
394 mutex_lock(&tree
->mutex
);
395 node
= tree_search(tree
, file_offset
);
399 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
400 if (!offset_in_entry(entry
, file_offset
))
403 atomic_inc(&entry
->refs
);
405 mutex_unlock(&tree
->mutex
);
410 * lookup and return any extent before 'file_offset'. NULL is returned
413 struct btrfs_ordered_extent
*
414 btrfs_lookup_first_ordered_extent(struct inode
* inode
, u64 file_offset
)
416 struct btrfs_ordered_inode_tree
*tree
;
417 struct rb_node
*node
;
418 struct btrfs_ordered_extent
*entry
= NULL
;
420 tree
= &BTRFS_I(inode
)->ordered_tree
;
421 mutex_lock(&tree
->mutex
);
422 node
= tree_search(tree
, file_offset
);
426 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
427 atomic_inc(&entry
->refs
);
429 mutex_unlock(&tree
->mutex
);
434 * After an extent is done, call this to conditionally update the on disk
435 * i_size. i_size is updated to cover any fully written part of the file.
437 int btrfs_ordered_update_i_size(struct inode
*inode
,
438 struct btrfs_ordered_extent
*ordered
)
440 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
441 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
445 struct rb_node
*node
;
446 struct btrfs_ordered_extent
*test
;
448 mutex_lock(&tree
->mutex
);
449 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
452 * if the disk i_size is already at the inode->i_size, or
453 * this ordered extent is inside the disk i_size, we're done
455 if (disk_i_size
>= inode
->i_size
||
456 ordered
->file_offset
+ ordered
->len
<= disk_i_size
) {
461 * we can't update the disk_isize if there are delalloc bytes
462 * between disk_i_size and this ordered extent
464 if (test_range_bit(io_tree
, disk_i_size
,
465 ordered
->file_offset
+ ordered
->len
- 1,
466 EXTENT_DELALLOC
, 0)) {
470 * walk backward from this ordered extent to disk_i_size.
471 * if we find an ordered extent then we can't update disk i_size
474 node
= &ordered
->rb_node
;
476 node
= rb_prev(node
);
479 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
480 if (test
->file_offset
+ test
->len
<= disk_i_size
)
482 if (test
->file_offset
>= inode
->i_size
)
484 if (test
->file_offset
>= disk_i_size
)
487 new_i_size
= min_t(u64
, entry_end(ordered
), i_size_read(inode
));
490 * at this point, we know we can safely update i_size to at least
491 * the offset from this ordered extent. But, we need to
492 * walk forward and see if ios from higher up in the file have
495 node
= rb_next(&ordered
->rb_node
);
499 * do we have an area where IO might have finished
500 * between our ordered extent and the next one.
502 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
503 if (test
->file_offset
> entry_end(ordered
)) {
504 i_size_test
= test
->file_offset
- 1;
507 i_size_test
= i_size_read(inode
);
511 * i_size_test is the end of a region after this ordered
512 * extent where there are no ordered extents. As long as there
513 * are no delalloc bytes in this area, it is safe to update
514 * disk_i_size to the end of the region.
516 if (i_size_test
> entry_end(ordered
) &&
517 !test_range_bit(io_tree
, entry_end(ordered
), i_size_test
,
518 EXTENT_DELALLOC
, 0)) {
519 new_i_size
= min_t(u64
, i_size_test
, i_size_read(inode
));
521 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
523 mutex_unlock(&tree
->mutex
);
528 * search the ordered extents for one corresponding to 'offset' and
529 * try to find a checksum. This is used because we allow pages to
530 * be reclaimed before their checksum is actually put into the btree
532 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u32
*sum
)
534 struct btrfs_ordered_sum
*ordered_sum
;
535 struct btrfs_sector_sum
*sector_sums
;
536 struct btrfs_ordered_extent
*ordered
;
537 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
538 struct list_head
*cur
;
539 unsigned long num_sectors
;
541 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
544 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
548 mutex_lock(&tree
->mutex
);
549 list_for_each_prev(cur
, &ordered
->list
) {
550 ordered_sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
551 if (offset
>= ordered_sum
->file_offset
) {
552 num_sectors
= ordered_sum
->len
/ sectorsize
;
553 sector_sums
= &ordered_sum
->sums
;
554 for (i
= 0; i
< num_sectors
; i
++) {
555 if (sector_sums
[i
].offset
== offset
) {
556 *sum
= sector_sums
[i
].sum
;
564 mutex_unlock(&tree
->mutex
);
This page took 0.049604 seconds and 6 git commands to generate.