2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
29 static struct kmem_cache
*btrfs_ordered_extent_cache
;
31 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
33 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
35 return entry
->file_offset
+ entry
->len
;
38 /* returns NULL if the insertion worked, or it returns the node it did find
41 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
44 struct rb_node
**p
= &root
->rb_node
;
45 struct rb_node
*parent
= NULL
;
46 struct btrfs_ordered_extent
*entry
;
50 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
52 if (file_offset
< entry
->file_offset
)
54 else if (file_offset
>= entry_end(entry
))
60 rb_link_node(node
, parent
, p
);
61 rb_insert_color(node
, root
);
65 static void ordered_data_tree_panic(struct inode
*inode
, int errno
,
68 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
69 btrfs_panic(fs_info
, errno
, "Inconsistency in ordered tree at offset "
74 * look for a given offset in the tree, and if it can't be found return the
77 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
78 struct rb_node
**prev_ret
)
80 struct rb_node
*n
= root
->rb_node
;
81 struct rb_node
*prev
= NULL
;
83 struct btrfs_ordered_extent
*entry
;
84 struct btrfs_ordered_extent
*prev_entry
= NULL
;
87 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
91 if (file_offset
< entry
->file_offset
)
93 else if (file_offset
>= entry_end(entry
))
101 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
102 test
= rb_next(prev
);
105 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
107 if (file_offset
< entry_end(prev_entry
))
113 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
115 while (prev
&& file_offset
< entry_end(prev_entry
)) {
116 test
= rb_prev(prev
);
119 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
132 if (file_offset
< entry
->file_offset
||
133 entry
->file_offset
+ entry
->len
<= file_offset
)
138 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
141 if (file_offset
+ len
<= entry
->file_offset
||
142 entry
->file_offset
+ entry
->len
<= file_offset
)
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
151 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
154 struct rb_root
*root
= &tree
->tree
;
155 struct rb_node
*prev
= NULL
;
157 struct btrfs_ordered_extent
*entry
;
160 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
162 if (offset_in_entry(entry
, file_offset
))
165 ret
= __tree_search(root
, file_offset
, &prev
);
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
179 * len is the length of the extent
181 * The tree is given a single reference on the ordered extent that was
184 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
185 u64 start
, u64 len
, u64 disk_len
,
186 int type
, int dio
, int compress_type
)
188 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
189 struct btrfs_ordered_inode_tree
*tree
;
190 struct rb_node
*node
;
191 struct btrfs_ordered_extent
*entry
;
193 tree
= &BTRFS_I(inode
)->ordered_tree
;
194 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
198 entry
->file_offset
= file_offset
;
199 entry
->start
= start
;
201 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) &&
202 !(type
== BTRFS_ORDERED_NOCOW
))
203 entry
->csum_bytes_left
= disk_len
;
204 entry
->disk_len
= disk_len
;
205 entry
->bytes_left
= len
;
206 entry
->inode
= igrab(inode
);
207 entry
->compress_type
= compress_type
;
208 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
209 set_bit(type
, &entry
->flags
);
212 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
214 /* one ref for the tree */
215 atomic_set(&entry
->refs
, 1);
216 init_waitqueue_head(&entry
->wait
);
217 INIT_LIST_HEAD(&entry
->list
);
218 INIT_LIST_HEAD(&entry
->root_extent_list
);
219 INIT_LIST_HEAD(&entry
->work_list
);
220 init_completion(&entry
->completion
);
221 INIT_LIST_HEAD(&entry
->log_list
);
223 trace_btrfs_ordered_extent_add(inode
, entry
);
225 spin_lock_irq(&tree
->lock
);
226 node
= tree_insert(&tree
->tree
, file_offset
,
229 ordered_data_tree_panic(inode
, -EEXIST
, file_offset
);
230 spin_unlock_irq(&tree
->lock
);
232 spin_lock(&root
->ordered_extent_lock
);
233 list_add_tail(&entry
->root_extent_list
,
234 &root
->ordered_extents
);
235 root
->nr_ordered_extents
++;
236 if (root
->nr_ordered_extents
== 1) {
237 spin_lock(&root
->fs_info
->ordered_root_lock
);
238 BUG_ON(!list_empty(&root
->ordered_root
));
239 list_add_tail(&root
->ordered_root
,
240 &root
->fs_info
->ordered_roots
);
241 spin_unlock(&root
->fs_info
->ordered_root_lock
);
243 spin_unlock(&root
->ordered_extent_lock
);
248 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
249 u64 start
, u64 len
, u64 disk_len
, int type
)
251 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
253 BTRFS_COMPRESS_NONE
);
256 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
257 u64 start
, u64 len
, u64 disk_len
, int type
)
259 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
261 BTRFS_COMPRESS_NONE
);
264 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
265 u64 start
, u64 len
, u64 disk_len
,
266 int type
, int compress_type
)
268 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
274 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
275 * when an ordered extent is finished. If the list covers more than one
276 * ordered extent, it is split across multiples.
278 void btrfs_add_ordered_sum(struct inode
*inode
,
279 struct btrfs_ordered_extent
*entry
,
280 struct btrfs_ordered_sum
*sum
)
282 struct btrfs_ordered_inode_tree
*tree
;
284 tree
= &BTRFS_I(inode
)->ordered_tree
;
285 spin_lock_irq(&tree
->lock
);
286 list_add_tail(&sum
->list
, &entry
->list
);
287 WARN_ON(entry
->csum_bytes_left
< sum
->len
);
288 entry
->csum_bytes_left
-= sum
->len
;
289 if (entry
->csum_bytes_left
== 0)
290 wake_up(&entry
->wait
);
291 spin_unlock_irq(&tree
->lock
);
295 * this is used to account for finished IO across a given range
296 * of the file. The IO may span ordered extents. If
297 * a given ordered_extent is completely done, 1 is returned, otherwise
300 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
301 * to make sure this function only returns 1 once for a given ordered extent.
303 * file_offset is updated to one byte past the range that is recorded as
304 * complete. This allows you to walk forward in the file.
306 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
307 struct btrfs_ordered_extent
**cached
,
308 u64
*file_offset
, u64 io_size
, int uptodate
)
310 struct btrfs_ordered_inode_tree
*tree
;
311 struct rb_node
*node
;
312 struct btrfs_ordered_extent
*entry
= NULL
;
319 tree
= &BTRFS_I(inode
)->ordered_tree
;
320 spin_lock_irqsave(&tree
->lock
, flags
);
321 node
= tree_search(tree
, *file_offset
);
327 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
328 if (!offset_in_entry(entry
, *file_offset
)) {
333 dec_start
= max(*file_offset
, entry
->file_offset
);
334 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
336 *file_offset
= dec_end
;
337 if (dec_start
> dec_end
) {
338 printk(KERN_CRIT
"bad ordering dec_start %llu end %llu\n",
341 to_dec
= dec_end
- dec_start
;
342 if (to_dec
> entry
->bytes_left
) {
343 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
344 entry
->bytes_left
, to_dec
);
346 entry
->bytes_left
-= to_dec
;
348 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
350 if (entry
->bytes_left
== 0)
351 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
355 if (!ret
&& cached
&& entry
) {
357 atomic_inc(&entry
->refs
);
359 spin_unlock_irqrestore(&tree
->lock
, flags
);
364 * this is used to account for finished IO across a given range
365 * of the file. The IO should not span ordered extents. If
366 * a given ordered_extent is completely done, 1 is returned, otherwise
369 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
370 * to make sure this function only returns 1 once for a given ordered extent.
372 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
373 struct btrfs_ordered_extent
**cached
,
374 u64 file_offset
, u64 io_size
, int uptodate
)
376 struct btrfs_ordered_inode_tree
*tree
;
377 struct rb_node
*node
;
378 struct btrfs_ordered_extent
*entry
= NULL
;
382 tree
= &BTRFS_I(inode
)->ordered_tree
;
383 spin_lock_irqsave(&tree
->lock
, flags
);
384 if (cached
&& *cached
) {
389 node
= tree_search(tree
, file_offset
);
395 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
397 if (!offset_in_entry(entry
, file_offset
)) {
402 if (io_size
> entry
->bytes_left
) {
403 printk(KERN_CRIT
"bad ordered accounting left %llu size %llu\n",
404 entry
->bytes_left
, io_size
);
406 entry
->bytes_left
-= io_size
;
408 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
410 if (entry
->bytes_left
== 0)
411 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
415 if (!ret
&& cached
&& entry
) {
417 atomic_inc(&entry
->refs
);
419 spin_unlock_irqrestore(&tree
->lock
, flags
);
423 /* Needs to either be called under a log transaction or the log_mutex */
424 void btrfs_get_logged_extents(struct btrfs_root
*log
, struct inode
*inode
)
426 struct btrfs_ordered_inode_tree
*tree
;
427 struct btrfs_ordered_extent
*ordered
;
429 int index
= log
->log_transid
% 2;
431 tree
= &BTRFS_I(inode
)->ordered_tree
;
432 spin_lock_irq(&tree
->lock
);
433 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
434 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
435 spin_lock(&log
->log_extents_lock
[index
]);
436 if (list_empty(&ordered
->log_list
)) {
437 list_add_tail(&ordered
->log_list
, &log
->logged_list
[index
]);
438 atomic_inc(&ordered
->refs
);
440 spin_unlock(&log
->log_extents_lock
[index
]);
442 spin_unlock_irq(&tree
->lock
);
445 void btrfs_wait_logged_extents(struct btrfs_root
*log
, u64 transid
)
447 struct btrfs_ordered_extent
*ordered
;
448 int index
= transid
% 2;
450 spin_lock_irq(&log
->log_extents_lock
[index
]);
451 while (!list_empty(&log
->logged_list
[index
])) {
452 ordered
= list_first_entry(&log
->logged_list
[index
],
453 struct btrfs_ordered_extent
,
455 list_del_init(&ordered
->log_list
);
456 spin_unlock_irq(&log
->log_extents_lock
[index
]);
457 wait_event(ordered
->wait
, test_bit(BTRFS_ORDERED_IO_DONE
,
459 btrfs_put_ordered_extent(ordered
);
460 spin_lock_irq(&log
->log_extents_lock
[index
]);
462 spin_unlock_irq(&log
->log_extents_lock
[index
]);
465 void btrfs_free_logged_extents(struct btrfs_root
*log
, u64 transid
)
467 struct btrfs_ordered_extent
*ordered
;
468 int index
= transid
% 2;
470 spin_lock_irq(&log
->log_extents_lock
[index
]);
471 while (!list_empty(&log
->logged_list
[index
])) {
472 ordered
= list_first_entry(&log
->logged_list
[index
],
473 struct btrfs_ordered_extent
,
475 list_del_init(&ordered
->log_list
);
476 spin_unlock_irq(&log
->log_extents_lock
[index
]);
477 btrfs_put_ordered_extent(ordered
);
478 spin_lock_irq(&log
->log_extents_lock
[index
]);
480 spin_unlock_irq(&log
->log_extents_lock
[index
]);
484 * used to drop a reference on an ordered extent. This will free
485 * the extent if the last reference is dropped
487 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
489 struct list_head
*cur
;
490 struct btrfs_ordered_sum
*sum
;
492 trace_btrfs_ordered_extent_put(entry
->inode
, entry
);
494 if (atomic_dec_and_test(&entry
->refs
)) {
496 btrfs_add_delayed_iput(entry
->inode
);
497 while (!list_empty(&entry
->list
)) {
498 cur
= entry
->list
.next
;
499 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
500 list_del(&sum
->list
);
503 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
508 * remove an ordered extent from the tree. No references are dropped
509 * and waiters are woken up.
511 void btrfs_remove_ordered_extent(struct inode
*inode
,
512 struct btrfs_ordered_extent
*entry
)
514 struct btrfs_ordered_inode_tree
*tree
;
515 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
516 struct rb_node
*node
;
518 tree
= &BTRFS_I(inode
)->ordered_tree
;
519 spin_lock_irq(&tree
->lock
);
520 node
= &entry
->rb_node
;
521 rb_erase(node
, &tree
->tree
);
523 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
524 spin_unlock_irq(&tree
->lock
);
526 spin_lock(&root
->ordered_extent_lock
);
527 list_del_init(&entry
->root_extent_list
);
528 root
->nr_ordered_extents
--;
530 trace_btrfs_ordered_extent_remove(inode
, entry
);
533 * we have no more ordered extents for this inode and
534 * no dirty pages. We can safely remove it from the
535 * list of ordered extents
537 if (RB_EMPTY_ROOT(&tree
->tree
) &&
538 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
539 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
542 if (!root
->nr_ordered_extents
) {
543 spin_lock(&root
->fs_info
->ordered_root_lock
);
544 BUG_ON(list_empty(&root
->ordered_root
));
545 list_del_init(&root
->ordered_root
);
546 spin_unlock(&root
->fs_info
->ordered_root_lock
);
548 spin_unlock(&root
->ordered_extent_lock
);
549 wake_up(&entry
->wait
);
552 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
554 struct btrfs_ordered_extent
*ordered
;
556 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
557 btrfs_start_ordered_extent(ordered
->inode
, ordered
, 1);
558 complete(&ordered
->completion
);
562 * wait for all the ordered extents in a root. This is done when balancing
563 * space between drives.
565 void btrfs_wait_ordered_extents(struct btrfs_root
*root
, int delay_iput
)
567 struct list_head splice
, works
;
568 struct btrfs_ordered_extent
*ordered
, *next
;
571 INIT_LIST_HEAD(&splice
);
572 INIT_LIST_HEAD(&works
);
574 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
575 spin_lock(&root
->ordered_extent_lock
);
576 list_splice_init(&root
->ordered_extents
, &splice
);
577 while (!list_empty(&splice
)) {
578 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
580 list_move_tail(&ordered
->root_extent_list
,
581 &root
->ordered_extents
);
583 * the inode may be getting freed (in sys_unlink path).
585 inode
= igrab(ordered
->inode
);
587 cond_resched_lock(&root
->ordered_extent_lock
);
591 atomic_inc(&ordered
->refs
);
592 spin_unlock(&root
->ordered_extent_lock
);
594 ordered
->flush_work
.func
= btrfs_run_ordered_extent_work
;
595 list_add_tail(&ordered
->work_list
, &works
);
596 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
597 &ordered
->flush_work
);
600 spin_lock(&root
->ordered_extent_lock
);
602 spin_unlock(&root
->ordered_extent_lock
);
604 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
605 list_del_init(&ordered
->work_list
);
606 wait_for_completion(&ordered
->completion
);
608 inode
= ordered
->inode
;
609 btrfs_put_ordered_extent(ordered
);
611 btrfs_add_delayed_iput(inode
);
617 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
620 void btrfs_wait_all_ordered_extents(struct btrfs_fs_info
*fs_info
,
623 struct btrfs_root
*root
;
624 struct list_head splice
;
626 INIT_LIST_HEAD(&splice
);
628 spin_lock(&fs_info
->ordered_root_lock
);
629 list_splice_init(&fs_info
->ordered_roots
, &splice
);
630 while (!list_empty(&splice
)) {
631 root
= list_first_entry(&splice
, struct btrfs_root
,
633 root
= btrfs_grab_fs_root(root
);
635 list_move_tail(&root
->ordered_root
,
636 &fs_info
->ordered_roots
);
637 spin_unlock(&fs_info
->ordered_root_lock
);
639 btrfs_wait_ordered_extents(root
, delay_iput
);
640 btrfs_put_fs_root(root
);
642 spin_lock(&fs_info
->ordered_root_lock
);
644 spin_unlock(&fs_info
->ordered_root_lock
);
648 * this is used during transaction commit to write all the inodes
649 * added to the ordered operation list. These files must be fully on
650 * disk before the transaction commits.
652 * we have two modes here, one is to just start the IO via filemap_flush
653 * and the other is to wait for all the io. When we wait, we have an
654 * extra check to make sure the ordered operation list really is empty
657 int btrfs_run_ordered_operations(struct btrfs_trans_handle
*trans
,
658 struct btrfs_root
*root
, int wait
)
660 struct btrfs_inode
*btrfs_inode
;
662 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
663 struct list_head splice
;
664 struct list_head works
;
665 struct btrfs_delalloc_work
*work
, *next
;
668 INIT_LIST_HEAD(&splice
);
669 INIT_LIST_HEAD(&works
);
671 mutex_lock(&root
->fs_info
->ordered_extent_flush_mutex
);
672 spin_lock(&root
->fs_info
->ordered_root_lock
);
673 list_splice_init(&cur_trans
->ordered_operations
, &splice
);
674 while (!list_empty(&splice
)) {
675 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
677 inode
= &btrfs_inode
->vfs_inode
;
679 list_del_init(&btrfs_inode
->ordered_operations
);
682 * the inode may be getting freed (in sys_unlink path).
684 inode
= igrab(inode
);
689 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
690 &cur_trans
->ordered_operations
);
691 spin_unlock(&root
->fs_info
->ordered_root_lock
);
693 work
= btrfs_alloc_delalloc_work(inode
, wait
, 1);
695 spin_lock(&root
->fs_info
->ordered_root_lock
);
696 if (list_empty(&BTRFS_I(inode
)->ordered_operations
))
697 list_add_tail(&btrfs_inode
->ordered_operations
,
699 list_splice_tail(&splice
,
700 &cur_trans
->ordered_operations
);
701 spin_unlock(&root
->fs_info
->ordered_root_lock
);
705 list_add_tail(&work
->list
, &works
);
706 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
710 spin_lock(&root
->fs_info
->ordered_root_lock
);
712 spin_unlock(&root
->fs_info
->ordered_root_lock
);
714 list_for_each_entry_safe(work
, next
, &works
, list
) {
715 list_del_init(&work
->list
);
716 btrfs_wait_and_free_delalloc_work(work
);
718 mutex_unlock(&root
->fs_info
->ordered_extent_flush_mutex
);
723 * Used to start IO or wait for a given ordered extent to finish.
725 * If wait is one, this effectively waits on page writeback for all the pages
726 * in the extent, and it waits on the io completion code to insert
727 * metadata into the btree corresponding to the extent
729 void btrfs_start_ordered_extent(struct inode
*inode
,
730 struct btrfs_ordered_extent
*entry
,
733 u64 start
= entry
->file_offset
;
734 u64 end
= start
+ entry
->len
- 1;
736 trace_btrfs_ordered_extent_start(inode
, entry
);
739 * pages in the range can be dirty, clean or writeback. We
740 * start IO on any dirty ones so the wait doesn't stall waiting
741 * for the flusher thread to find them
743 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
744 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
746 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
752 * Used to wait on ordered extents across a large range of bytes.
754 void btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
758 struct btrfs_ordered_extent
*ordered
;
760 if (start
+ len
< start
) {
761 orig_end
= INT_LIMIT(loff_t
);
763 orig_end
= start
+ len
- 1;
764 if (orig_end
> INT_LIMIT(loff_t
))
765 orig_end
= INT_LIMIT(loff_t
);
768 /* start IO across the range first to instantiate any delalloc
771 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
774 * So with compression we will find and lock a dirty page and clear the
775 * first one as dirty, setup an async extent, and immediately return
776 * with the entire range locked but with nobody actually marked with
777 * writeback. So we can't just filemap_write_and_wait_range() and
778 * expect it to work since it will just kick off a thread to do the
779 * actual work. So we need to call filemap_fdatawrite_range _again_
780 * since it will wait on the page lock, which won't be unlocked until
781 * after the pages have been marked as writeback and so we're good to go
782 * from there. We have to do this otherwise we'll miss the ordered
783 * extents and that results in badness. Please Josef, do not think you
784 * know better and pull this out at some point in the future, it is
785 * right and you are wrong.
787 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
788 &BTRFS_I(inode
)->runtime_flags
))
789 filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
791 filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
795 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
798 if (ordered
->file_offset
> orig_end
) {
799 btrfs_put_ordered_extent(ordered
);
802 if (ordered
->file_offset
+ ordered
->len
< start
) {
803 btrfs_put_ordered_extent(ordered
);
806 btrfs_start_ordered_extent(inode
, ordered
, 1);
807 end
= ordered
->file_offset
;
808 btrfs_put_ordered_extent(ordered
);
809 if (end
== 0 || end
== start
)
816 * find an ordered extent corresponding to file_offset. return NULL if
817 * nothing is found, otherwise take a reference on the extent and return it
819 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
822 struct btrfs_ordered_inode_tree
*tree
;
823 struct rb_node
*node
;
824 struct btrfs_ordered_extent
*entry
= NULL
;
826 tree
= &BTRFS_I(inode
)->ordered_tree
;
827 spin_lock_irq(&tree
->lock
);
828 node
= tree_search(tree
, file_offset
);
832 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
833 if (!offset_in_entry(entry
, file_offset
))
836 atomic_inc(&entry
->refs
);
838 spin_unlock_irq(&tree
->lock
);
842 /* Since the DIO code tries to lock a wide area we need to look for any ordered
843 * extents that exist in the range, rather than just the start of the range.
845 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
849 struct btrfs_ordered_inode_tree
*tree
;
850 struct rb_node
*node
;
851 struct btrfs_ordered_extent
*entry
= NULL
;
853 tree
= &BTRFS_I(inode
)->ordered_tree
;
854 spin_lock_irq(&tree
->lock
);
855 node
= tree_search(tree
, file_offset
);
857 node
= tree_search(tree
, file_offset
+ len
);
863 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
864 if (range_overlaps(entry
, file_offset
, len
))
867 if (entry
->file_offset
>= file_offset
+ len
) {
872 node
= rb_next(node
);
878 atomic_inc(&entry
->refs
);
879 spin_unlock_irq(&tree
->lock
);
884 * lookup and return any extent before 'file_offset'. NULL is returned
887 struct btrfs_ordered_extent
*
888 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
890 struct btrfs_ordered_inode_tree
*tree
;
891 struct rb_node
*node
;
892 struct btrfs_ordered_extent
*entry
= NULL
;
894 tree
= &BTRFS_I(inode
)->ordered_tree
;
895 spin_lock_irq(&tree
->lock
);
896 node
= tree_search(tree
, file_offset
);
900 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
901 atomic_inc(&entry
->refs
);
903 spin_unlock_irq(&tree
->lock
);
908 * After an extent is done, call this to conditionally update the on disk
909 * i_size. i_size is updated to cover any fully written part of the file.
911 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
912 struct btrfs_ordered_extent
*ordered
)
914 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
917 u64 i_size
= i_size_read(inode
);
918 struct rb_node
*node
;
919 struct rb_node
*prev
= NULL
;
920 struct btrfs_ordered_extent
*test
;
924 offset
= entry_end(ordered
);
926 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
928 spin_lock_irq(&tree
->lock
);
929 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
932 if (disk_i_size
> i_size
) {
933 BTRFS_I(inode
)->disk_i_size
= i_size
;
939 * if the disk i_size is already at the inode->i_size, or
940 * this ordered extent is inside the disk i_size, we're done
942 if (disk_i_size
== i_size
)
946 * We still need to update disk_i_size if outstanding_isize is greater
949 if (offset
<= disk_i_size
&&
950 (!ordered
|| ordered
->outstanding_isize
<= disk_i_size
))
954 * walk backward from this ordered extent to disk_i_size.
955 * if we find an ordered extent then we can't update disk i_size
959 node
= rb_prev(&ordered
->rb_node
);
961 prev
= tree_search(tree
, offset
);
963 * we insert file extents without involving ordered struct,
964 * so there should be no ordered struct cover this offset
967 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
969 BUG_ON(offset_in_entry(test
, offset
));
973 for (; node
; node
= rb_prev(node
)) {
974 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
976 /* We treat this entry as if it doesnt exist */
977 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &test
->flags
))
979 if (test
->file_offset
+ test
->len
<= disk_i_size
)
981 if (test
->file_offset
>= i_size
)
983 if (entry_end(test
) > disk_i_size
) {
985 * we don't update disk_i_size now, so record this
986 * undealt i_size. Or we will not know the real
989 if (test
->outstanding_isize
< offset
)
990 test
->outstanding_isize
= offset
;
992 ordered
->outstanding_isize
>
993 test
->outstanding_isize
)
994 test
->outstanding_isize
=
995 ordered
->outstanding_isize
;
999 new_i_size
= min_t(u64
, offset
, i_size
);
1002 * Some ordered extents may completed before the current one, and
1003 * we hold the real i_size in ->outstanding_isize.
1005 if (ordered
&& ordered
->outstanding_isize
> new_i_size
)
1006 new_i_size
= min_t(u64
, ordered
->outstanding_isize
, i_size
);
1007 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
1011 * We need to do this because we can't remove ordered extents until
1012 * after the i_disk_size has been updated and then the inode has been
1013 * updated to reflect the change, so we need to tell anybody who finds
1014 * this ordered extent that we've already done all the real work, we
1015 * just haven't completed all the other work.
1018 set_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &ordered
->flags
);
1019 spin_unlock_irq(&tree
->lock
);
1024 * search the ordered extents for one corresponding to 'offset' and
1025 * try to find a checksum. This is used because we allow pages to
1026 * be reclaimed before their checksum is actually put into the btree
1028 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
1031 struct btrfs_ordered_sum
*ordered_sum
;
1032 struct btrfs_ordered_extent
*ordered
;
1033 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
1034 unsigned long num_sectors
;
1036 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
1039 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
1043 spin_lock_irq(&tree
->lock
);
1044 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
1045 if (disk_bytenr
>= ordered_sum
->bytenr
&&
1046 disk_bytenr
< ordered_sum
->bytenr
+ ordered_sum
->len
) {
1047 i
= (disk_bytenr
- ordered_sum
->bytenr
) >>
1048 inode
->i_sb
->s_blocksize_bits
;
1049 num_sectors
= ordered_sum
->len
>>
1050 inode
->i_sb
->s_blocksize_bits
;
1051 num_sectors
= min_t(int, len
- index
, num_sectors
- i
);
1052 memcpy(sum
+ index
, ordered_sum
->sums
+ i
,
1055 index
+= (int)num_sectors
;
1058 disk_bytenr
+= num_sectors
* sectorsize
;
1062 spin_unlock_irq(&tree
->lock
);
1063 btrfs_put_ordered_extent(ordered
);
1069 * add a given inode to the list of inodes that must be fully on
1070 * disk before a transaction commit finishes.
1072 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1073 * used to make sure renamed files are fully on disk.
1075 * It is a noop if the inode is already fully on disk.
1077 * If trans is not null, we'll do a friendly check for a transaction that
1078 * is already flushing things and force the IO down ourselves.
1080 void btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
1081 struct btrfs_root
*root
, struct inode
*inode
)
1083 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1086 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
1089 * if this file hasn't been changed since the last transaction
1090 * commit, we can safely return without doing anything
1092 if (last_mod
< root
->fs_info
->last_trans_committed
)
1095 spin_lock(&root
->fs_info
->ordered_root_lock
);
1096 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
1097 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
1098 &cur_trans
->ordered_operations
);
1100 spin_unlock(&root
->fs_info
->ordered_root_lock
);
1103 int __init
ordered_data_init(void)
1105 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1106 sizeof(struct btrfs_ordered_extent
), 0,
1107 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1109 if (!btrfs_ordered_extent_cache
)
1115 void ordered_data_exit(void)
1117 if (btrfs_ordered_extent_cache
)
1118 kmem_cache_destroy(btrfs_ordered_extent_cache
);