Btrfs: allow partial ordered extent completion
[deliverable/linux.git] / fs / btrfs / ordered-data.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 #include "disk-io.h"
28
29 static struct kmem_cache *btrfs_ordered_extent_cache;
30
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 {
33 if (entry->file_offset + entry->len < entry->file_offset)
34 return (u64)-1;
35 return entry->file_offset + entry->len;
36 }
37
38 /* returns NULL if the insertion worked, or it returns the node it did find
39 * in the tree
40 */
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 struct rb_node *node)
43 {
44 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
46 struct btrfs_ordered_extent *entry;
47
48 while (*p) {
49 parent = *p;
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51
52 if (file_offset < entry->file_offset)
53 p = &(*p)->rb_left;
54 else if (file_offset >= entry_end(entry))
55 p = &(*p)->rb_right;
56 else
57 return parent;
58 }
59
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
62 return NULL;
63 }
64
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
66 u64 offset)
67 {
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70 "%llu\n", offset);
71 }
72
73 /*
74 * look for a given offset in the tree, and if it can't be found return the
75 * first lesser offset
76 */
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
79 {
80 struct rb_node *n = root->rb_node;
81 struct rb_node *prev = NULL;
82 struct rb_node *test;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
85
86 while (n) {
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88 prev = n;
89 prev_entry = entry;
90
91 if (file_offset < entry->file_offset)
92 n = n->rb_left;
93 else if (file_offset >= entry_end(entry))
94 n = n->rb_right;
95 else
96 return n;
97 }
98 if (!prev_ret)
99 return NULL;
100
101 while (prev && file_offset >= entry_end(prev_entry)) {
102 test = rb_next(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 if (file_offset < entry_end(prev_entry))
108 break;
109
110 prev = test;
111 }
112 if (prev)
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 rb_node);
115 while (prev && file_offset < entry_end(prev_entry)) {
116 test = rb_prev(prev);
117 if (!test)
118 break;
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 rb_node);
121 prev = test;
122 }
123 *prev_ret = prev;
124 return NULL;
125 }
126
127 /*
128 * helper to check if a given offset is inside a given entry
129 */
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 {
132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
134 return 0;
135 return 1;
136 }
137
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 u64 len)
140 {
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
143 return 0;
144 return 1;
145 }
146
147 /*
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
150 */
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 u64 file_offset)
153 {
154 struct rb_root *root = &tree->tree;
155 struct rb_node *prev = NULL;
156 struct rb_node *ret;
157 struct btrfs_ordered_extent *entry;
158
159 if (tree->last) {
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 rb_node);
162 if (offset_in_entry(entry, file_offset))
163 return tree->last;
164 }
165 ret = __tree_search(root, file_offset, &prev);
166 if (!ret)
167 ret = prev;
168 if (ret)
169 tree->last = ret;
170 return ret;
171 }
172
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
175 *
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
178 *
179 * len is the length of the extent
180 *
181 * The tree is given a single reference on the ordered extent that was
182 * inserted.
183 */
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
186 int type, int dio, int compress_type)
187 {
188 struct btrfs_root *root = BTRFS_I(inode)->root;
189 struct btrfs_ordered_inode_tree *tree;
190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
192
193 tree = &BTRFS_I(inode)->ordered_tree;
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195 if (!entry)
196 return -ENOMEM;
197
198 entry->file_offset = file_offset;
199 entry->start = start;
200 entry->len = len;
201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
204 entry->disk_len = disk_len;
205 entry->bytes_left = len;
206 entry->inode = igrab(inode);
207 entry->compress_type = compress_type;
208 entry->truncated_len = (u64)-1;
209 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
210 set_bit(type, &entry->flags);
211
212 if (dio)
213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
214
215 /* one ref for the tree */
216 atomic_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
219 INIT_LIST_HEAD(&entry->root_extent_list);
220 INIT_LIST_HEAD(&entry->work_list);
221 init_completion(&entry->completion);
222 INIT_LIST_HEAD(&entry->log_list);
223
224 trace_btrfs_ordered_extent_add(inode, entry);
225
226 spin_lock_irq(&tree->lock);
227 node = tree_insert(&tree->tree, file_offset,
228 &entry->rb_node);
229 if (node)
230 ordered_data_tree_panic(inode, -EEXIST, file_offset);
231 spin_unlock_irq(&tree->lock);
232
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&root->fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root,
241 &root->fs_info->ordered_roots);
242 spin_unlock(&root->fs_info->ordered_root_lock);
243 }
244 spin_unlock(&root->ordered_extent_lock);
245
246 return 0;
247 }
248
249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
250 u64 start, u64 len, u64 disk_len, int type)
251 {
252 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
253 disk_len, type, 0,
254 BTRFS_COMPRESS_NONE);
255 }
256
257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
258 u64 start, u64 len, u64 disk_len, int type)
259 {
260 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261 disk_len, type, 1,
262 BTRFS_COMPRESS_NONE);
263 }
264
265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
266 u64 start, u64 len, u64 disk_len,
267 int type, int compress_type)
268 {
269 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
270 disk_len, type, 0,
271 compress_type);
272 }
273
274 /*
275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
276 * when an ordered extent is finished. If the list covers more than one
277 * ordered extent, it is split across multiples.
278 */
279 void btrfs_add_ordered_sum(struct inode *inode,
280 struct btrfs_ordered_extent *entry,
281 struct btrfs_ordered_sum *sum)
282 {
283 struct btrfs_ordered_inode_tree *tree;
284
285 tree = &BTRFS_I(inode)->ordered_tree;
286 spin_lock_irq(&tree->lock);
287 list_add_tail(&sum->list, &entry->list);
288 WARN_ON(entry->csum_bytes_left < sum->len);
289 entry->csum_bytes_left -= sum->len;
290 if (entry->csum_bytes_left == 0)
291 wake_up(&entry->wait);
292 spin_unlock_irq(&tree->lock);
293 }
294
295 /*
296 * this is used to account for finished IO across a given range
297 * of the file. The IO may span ordered extents. If
298 * a given ordered_extent is completely done, 1 is returned, otherwise
299 * 0.
300 *
301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302 * to make sure this function only returns 1 once for a given ordered extent.
303 *
304 * file_offset is updated to one byte past the range that is recorded as
305 * complete. This allows you to walk forward in the file.
306 */
307 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
308 struct btrfs_ordered_extent **cached,
309 u64 *file_offset, u64 io_size, int uptodate)
310 {
311 struct btrfs_ordered_inode_tree *tree;
312 struct rb_node *node;
313 struct btrfs_ordered_extent *entry = NULL;
314 int ret;
315 unsigned long flags;
316 u64 dec_end;
317 u64 dec_start;
318 u64 to_dec;
319
320 tree = &BTRFS_I(inode)->ordered_tree;
321 spin_lock_irqsave(&tree->lock, flags);
322 node = tree_search(tree, *file_offset);
323 if (!node) {
324 ret = 1;
325 goto out;
326 }
327
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 if (!offset_in_entry(entry, *file_offset)) {
330 ret = 1;
331 goto out;
332 }
333
334 dec_start = max(*file_offset, entry->file_offset);
335 dec_end = min(*file_offset + io_size, entry->file_offset +
336 entry->len);
337 *file_offset = dec_end;
338 if (dec_start > dec_end) {
339 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
340 dec_start, dec_end);
341 }
342 to_dec = dec_end - dec_start;
343 if (to_dec > entry->bytes_left) {
344 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
345 entry->bytes_left, to_dec);
346 }
347 entry->bytes_left -= to_dec;
348 if (!uptodate)
349 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
350
351 if (entry->bytes_left == 0)
352 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
353 else
354 ret = 1;
355 out:
356 if (!ret && cached && entry) {
357 *cached = entry;
358 atomic_inc(&entry->refs);
359 }
360 spin_unlock_irqrestore(&tree->lock, flags);
361 return ret == 0;
362 }
363
364 /*
365 * this is used to account for finished IO across a given range
366 * of the file. The IO should not span ordered extents. If
367 * a given ordered_extent is completely done, 1 is returned, otherwise
368 * 0.
369 *
370 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
371 * to make sure this function only returns 1 once for a given ordered extent.
372 */
373 int btrfs_dec_test_ordered_pending(struct inode *inode,
374 struct btrfs_ordered_extent **cached,
375 u64 file_offset, u64 io_size, int uptodate)
376 {
377 struct btrfs_ordered_inode_tree *tree;
378 struct rb_node *node;
379 struct btrfs_ordered_extent *entry = NULL;
380 unsigned long flags;
381 int ret;
382
383 tree = &BTRFS_I(inode)->ordered_tree;
384 spin_lock_irqsave(&tree->lock, flags);
385 if (cached && *cached) {
386 entry = *cached;
387 goto have_entry;
388 }
389
390 node = tree_search(tree, file_offset);
391 if (!node) {
392 ret = 1;
393 goto out;
394 }
395
396 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
397 have_entry:
398 if (!offset_in_entry(entry, file_offset)) {
399 ret = 1;
400 goto out;
401 }
402
403 if (io_size > entry->bytes_left) {
404 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
405 entry->bytes_left, io_size);
406 }
407 entry->bytes_left -= io_size;
408 if (!uptodate)
409 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
410
411 if (entry->bytes_left == 0)
412 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
413 else
414 ret = 1;
415 out:
416 if (!ret && cached && entry) {
417 *cached = entry;
418 atomic_inc(&entry->refs);
419 }
420 spin_unlock_irqrestore(&tree->lock, flags);
421 return ret == 0;
422 }
423
424 /* Needs to either be called under a log transaction or the log_mutex */
425 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
426 {
427 struct btrfs_ordered_inode_tree *tree;
428 struct btrfs_ordered_extent *ordered;
429 struct rb_node *n;
430 int index = log->log_transid % 2;
431
432 tree = &BTRFS_I(inode)->ordered_tree;
433 spin_lock_irq(&tree->lock);
434 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
435 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
436 spin_lock(&log->log_extents_lock[index]);
437 if (list_empty(&ordered->log_list)) {
438 list_add_tail(&ordered->log_list, &log->logged_list[index]);
439 atomic_inc(&ordered->refs);
440 }
441 spin_unlock(&log->log_extents_lock[index]);
442 }
443 spin_unlock_irq(&tree->lock);
444 }
445
446 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
447 {
448 struct btrfs_ordered_extent *ordered;
449 int index = transid % 2;
450
451 spin_lock_irq(&log->log_extents_lock[index]);
452 while (!list_empty(&log->logged_list[index])) {
453 ordered = list_first_entry(&log->logged_list[index],
454 struct btrfs_ordered_extent,
455 log_list);
456 list_del_init(&ordered->log_list);
457 spin_unlock_irq(&log->log_extents_lock[index]);
458 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
459 &ordered->flags));
460 btrfs_put_ordered_extent(ordered);
461 spin_lock_irq(&log->log_extents_lock[index]);
462 }
463 spin_unlock_irq(&log->log_extents_lock[index]);
464 }
465
466 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
467 {
468 struct btrfs_ordered_extent *ordered;
469 int index = transid % 2;
470
471 spin_lock_irq(&log->log_extents_lock[index]);
472 while (!list_empty(&log->logged_list[index])) {
473 ordered = list_first_entry(&log->logged_list[index],
474 struct btrfs_ordered_extent,
475 log_list);
476 list_del_init(&ordered->log_list);
477 spin_unlock_irq(&log->log_extents_lock[index]);
478 btrfs_put_ordered_extent(ordered);
479 spin_lock_irq(&log->log_extents_lock[index]);
480 }
481 spin_unlock_irq(&log->log_extents_lock[index]);
482 }
483
484 /*
485 * used to drop a reference on an ordered extent. This will free
486 * the extent if the last reference is dropped
487 */
488 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
489 {
490 struct list_head *cur;
491 struct btrfs_ordered_sum *sum;
492
493 trace_btrfs_ordered_extent_put(entry->inode, entry);
494
495 if (atomic_dec_and_test(&entry->refs)) {
496 if (entry->inode)
497 btrfs_add_delayed_iput(entry->inode);
498 while (!list_empty(&entry->list)) {
499 cur = entry->list.next;
500 sum = list_entry(cur, struct btrfs_ordered_sum, list);
501 list_del(&sum->list);
502 kfree(sum);
503 }
504 kmem_cache_free(btrfs_ordered_extent_cache, entry);
505 }
506 }
507
508 /*
509 * remove an ordered extent from the tree. No references are dropped
510 * and waiters are woken up.
511 */
512 void btrfs_remove_ordered_extent(struct inode *inode,
513 struct btrfs_ordered_extent *entry)
514 {
515 struct btrfs_ordered_inode_tree *tree;
516 struct btrfs_root *root = BTRFS_I(inode)->root;
517 struct rb_node *node;
518
519 tree = &BTRFS_I(inode)->ordered_tree;
520 spin_lock_irq(&tree->lock);
521 node = &entry->rb_node;
522 rb_erase(node, &tree->tree);
523 tree->last = NULL;
524 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
525 spin_unlock_irq(&tree->lock);
526
527 spin_lock(&root->ordered_extent_lock);
528 list_del_init(&entry->root_extent_list);
529 root->nr_ordered_extents--;
530
531 trace_btrfs_ordered_extent_remove(inode, entry);
532
533 /*
534 * we have no more ordered extents for this inode and
535 * no dirty pages. We can safely remove it from the
536 * list of ordered extents
537 */
538 if (RB_EMPTY_ROOT(&tree->tree) &&
539 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
540 list_del_init(&BTRFS_I(inode)->ordered_operations);
541 }
542
543 if (!root->nr_ordered_extents) {
544 spin_lock(&root->fs_info->ordered_root_lock);
545 BUG_ON(list_empty(&root->ordered_root));
546 list_del_init(&root->ordered_root);
547 spin_unlock(&root->fs_info->ordered_root_lock);
548 }
549 spin_unlock(&root->ordered_extent_lock);
550 wake_up(&entry->wait);
551 }
552
553 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
554 {
555 struct btrfs_ordered_extent *ordered;
556
557 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
558 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
559 complete(&ordered->completion);
560 }
561
562 /*
563 * wait for all the ordered extents in a root. This is done when balancing
564 * space between drives.
565 */
566 void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
567 {
568 struct list_head splice, works;
569 struct btrfs_ordered_extent *ordered, *next;
570 struct inode *inode;
571
572 INIT_LIST_HEAD(&splice);
573 INIT_LIST_HEAD(&works);
574
575 mutex_lock(&root->fs_info->ordered_operations_mutex);
576 spin_lock(&root->ordered_extent_lock);
577 list_splice_init(&root->ordered_extents, &splice);
578 while (!list_empty(&splice)) {
579 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
580 root_extent_list);
581 list_move_tail(&ordered->root_extent_list,
582 &root->ordered_extents);
583 /*
584 * the inode may be getting freed (in sys_unlink path).
585 */
586 inode = igrab(ordered->inode);
587 if (!inode) {
588 cond_resched_lock(&root->ordered_extent_lock);
589 continue;
590 }
591
592 atomic_inc(&ordered->refs);
593 spin_unlock(&root->ordered_extent_lock);
594
595 ordered->flush_work.func = btrfs_run_ordered_extent_work;
596 list_add_tail(&ordered->work_list, &works);
597 btrfs_queue_worker(&root->fs_info->flush_workers,
598 &ordered->flush_work);
599
600 cond_resched();
601 spin_lock(&root->ordered_extent_lock);
602 }
603 spin_unlock(&root->ordered_extent_lock);
604
605 list_for_each_entry_safe(ordered, next, &works, work_list) {
606 list_del_init(&ordered->work_list);
607 wait_for_completion(&ordered->completion);
608
609 inode = ordered->inode;
610 btrfs_put_ordered_extent(ordered);
611 if (delay_iput)
612 btrfs_add_delayed_iput(inode);
613 else
614 iput(inode);
615
616 cond_resched();
617 }
618 mutex_unlock(&root->fs_info->ordered_operations_mutex);
619 }
620
621 void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
622 int delay_iput)
623 {
624 struct btrfs_root *root;
625 struct list_head splice;
626
627 INIT_LIST_HEAD(&splice);
628
629 spin_lock(&fs_info->ordered_root_lock);
630 list_splice_init(&fs_info->ordered_roots, &splice);
631 while (!list_empty(&splice)) {
632 root = list_first_entry(&splice, struct btrfs_root,
633 ordered_root);
634 root = btrfs_grab_fs_root(root);
635 BUG_ON(!root);
636 list_move_tail(&root->ordered_root,
637 &fs_info->ordered_roots);
638 spin_unlock(&fs_info->ordered_root_lock);
639
640 btrfs_wait_ordered_extents(root, delay_iput);
641 btrfs_put_fs_root(root);
642
643 spin_lock(&fs_info->ordered_root_lock);
644 }
645 spin_unlock(&fs_info->ordered_root_lock);
646 }
647
648 /*
649 * this is used during transaction commit to write all the inodes
650 * added to the ordered operation list. These files must be fully on
651 * disk before the transaction commits.
652 *
653 * we have two modes here, one is to just start the IO via filemap_flush
654 * and the other is to wait for all the io. When we wait, we have an
655 * extra check to make sure the ordered operation list really is empty
656 * before we return
657 */
658 int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
659 struct btrfs_root *root, int wait)
660 {
661 struct btrfs_inode *btrfs_inode;
662 struct inode *inode;
663 struct btrfs_transaction *cur_trans = trans->transaction;
664 struct list_head splice;
665 struct list_head works;
666 struct btrfs_delalloc_work *work, *next;
667 int ret = 0;
668
669 INIT_LIST_HEAD(&splice);
670 INIT_LIST_HEAD(&works);
671
672 mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
673 spin_lock(&root->fs_info->ordered_root_lock);
674 list_splice_init(&cur_trans->ordered_operations, &splice);
675 while (!list_empty(&splice)) {
676 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
677 ordered_operations);
678 inode = &btrfs_inode->vfs_inode;
679
680 list_del_init(&btrfs_inode->ordered_operations);
681
682 /*
683 * the inode may be getting freed (in sys_unlink path).
684 */
685 inode = igrab(inode);
686 if (!inode)
687 continue;
688
689 if (!wait)
690 list_add_tail(&BTRFS_I(inode)->ordered_operations,
691 &cur_trans->ordered_operations);
692 spin_unlock(&root->fs_info->ordered_root_lock);
693
694 work = btrfs_alloc_delalloc_work(inode, wait, 1);
695 if (!work) {
696 spin_lock(&root->fs_info->ordered_root_lock);
697 if (list_empty(&BTRFS_I(inode)->ordered_operations))
698 list_add_tail(&btrfs_inode->ordered_operations,
699 &splice);
700 list_splice_tail(&splice,
701 &cur_trans->ordered_operations);
702 spin_unlock(&root->fs_info->ordered_root_lock);
703 ret = -ENOMEM;
704 goto out;
705 }
706 list_add_tail(&work->list, &works);
707 btrfs_queue_worker(&root->fs_info->flush_workers,
708 &work->work);
709
710 cond_resched();
711 spin_lock(&root->fs_info->ordered_root_lock);
712 }
713 spin_unlock(&root->fs_info->ordered_root_lock);
714 out:
715 list_for_each_entry_safe(work, next, &works, list) {
716 list_del_init(&work->list);
717 btrfs_wait_and_free_delalloc_work(work);
718 }
719 mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
720 return ret;
721 }
722
723 /*
724 * Used to start IO or wait for a given ordered extent to finish.
725 *
726 * If wait is one, this effectively waits on page writeback for all the pages
727 * in the extent, and it waits on the io completion code to insert
728 * metadata into the btree corresponding to the extent
729 */
730 void btrfs_start_ordered_extent(struct inode *inode,
731 struct btrfs_ordered_extent *entry,
732 int wait)
733 {
734 u64 start = entry->file_offset;
735 u64 end = start + entry->len - 1;
736
737 trace_btrfs_ordered_extent_start(inode, entry);
738
739 /*
740 * pages in the range can be dirty, clean or writeback. We
741 * start IO on any dirty ones so the wait doesn't stall waiting
742 * for the flusher thread to find them
743 */
744 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
745 filemap_fdatawrite_range(inode->i_mapping, start, end);
746 if (wait) {
747 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
748 &entry->flags));
749 }
750 }
751
752 /*
753 * Used to wait on ordered extents across a large range of bytes.
754 */
755 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
756 {
757 u64 end;
758 u64 orig_end;
759 struct btrfs_ordered_extent *ordered;
760
761 if (start + len < start) {
762 orig_end = INT_LIMIT(loff_t);
763 } else {
764 orig_end = start + len - 1;
765 if (orig_end > INT_LIMIT(loff_t))
766 orig_end = INT_LIMIT(loff_t);
767 }
768
769 /* start IO across the range first to instantiate any delalloc
770 * extents
771 */
772 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
773
774 /*
775 * So with compression we will find and lock a dirty page and clear the
776 * first one as dirty, setup an async extent, and immediately return
777 * with the entire range locked but with nobody actually marked with
778 * writeback. So we can't just filemap_write_and_wait_range() and
779 * expect it to work since it will just kick off a thread to do the
780 * actual work. So we need to call filemap_fdatawrite_range _again_
781 * since it will wait on the page lock, which won't be unlocked until
782 * after the pages have been marked as writeback and so we're good to go
783 * from there. We have to do this otherwise we'll miss the ordered
784 * extents and that results in badness. Please Josef, do not think you
785 * know better and pull this out at some point in the future, it is
786 * right and you are wrong.
787 */
788 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
789 &BTRFS_I(inode)->runtime_flags))
790 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
791
792 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
793
794 end = orig_end;
795 while (1) {
796 ordered = btrfs_lookup_first_ordered_extent(inode, end);
797 if (!ordered)
798 break;
799 if (ordered->file_offset > orig_end) {
800 btrfs_put_ordered_extent(ordered);
801 break;
802 }
803 if (ordered->file_offset + ordered->len < start) {
804 btrfs_put_ordered_extent(ordered);
805 break;
806 }
807 btrfs_start_ordered_extent(inode, ordered, 1);
808 end = ordered->file_offset;
809 btrfs_put_ordered_extent(ordered);
810 if (end == 0 || end == start)
811 break;
812 end--;
813 }
814 }
815
816 /*
817 * find an ordered extent corresponding to file_offset. return NULL if
818 * nothing is found, otherwise take a reference on the extent and return it
819 */
820 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
821 u64 file_offset)
822 {
823 struct btrfs_ordered_inode_tree *tree;
824 struct rb_node *node;
825 struct btrfs_ordered_extent *entry = NULL;
826
827 tree = &BTRFS_I(inode)->ordered_tree;
828 spin_lock_irq(&tree->lock);
829 node = tree_search(tree, file_offset);
830 if (!node)
831 goto out;
832
833 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
834 if (!offset_in_entry(entry, file_offset))
835 entry = NULL;
836 if (entry)
837 atomic_inc(&entry->refs);
838 out:
839 spin_unlock_irq(&tree->lock);
840 return entry;
841 }
842
843 /* Since the DIO code tries to lock a wide area we need to look for any ordered
844 * extents that exist in the range, rather than just the start of the range.
845 */
846 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
847 u64 file_offset,
848 u64 len)
849 {
850 struct btrfs_ordered_inode_tree *tree;
851 struct rb_node *node;
852 struct btrfs_ordered_extent *entry = NULL;
853
854 tree = &BTRFS_I(inode)->ordered_tree;
855 spin_lock_irq(&tree->lock);
856 node = tree_search(tree, file_offset);
857 if (!node) {
858 node = tree_search(tree, file_offset + len);
859 if (!node)
860 goto out;
861 }
862
863 while (1) {
864 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
865 if (range_overlaps(entry, file_offset, len))
866 break;
867
868 if (entry->file_offset >= file_offset + len) {
869 entry = NULL;
870 break;
871 }
872 entry = NULL;
873 node = rb_next(node);
874 if (!node)
875 break;
876 }
877 out:
878 if (entry)
879 atomic_inc(&entry->refs);
880 spin_unlock_irq(&tree->lock);
881 return entry;
882 }
883
884 /*
885 * lookup and return any extent before 'file_offset'. NULL is returned
886 * if none is found
887 */
888 struct btrfs_ordered_extent *
889 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
890 {
891 struct btrfs_ordered_inode_tree *tree;
892 struct rb_node *node;
893 struct btrfs_ordered_extent *entry = NULL;
894
895 tree = &BTRFS_I(inode)->ordered_tree;
896 spin_lock_irq(&tree->lock);
897 node = tree_search(tree, file_offset);
898 if (!node)
899 goto out;
900
901 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
902 atomic_inc(&entry->refs);
903 out:
904 spin_unlock_irq(&tree->lock);
905 return entry;
906 }
907
908 /*
909 * After an extent is done, call this to conditionally update the on disk
910 * i_size. i_size is updated to cover any fully written part of the file.
911 */
912 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
913 struct btrfs_ordered_extent *ordered)
914 {
915 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
916 u64 disk_i_size;
917 u64 new_i_size;
918 u64 i_size = i_size_read(inode);
919 struct rb_node *node;
920 struct rb_node *prev = NULL;
921 struct btrfs_ordered_extent *test;
922 int ret = 1;
923
924 spin_lock_irq(&tree->lock);
925 if (ordered) {
926 offset = entry_end(ordered);
927 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
928 offset = min(offset,
929 ordered->file_offset +
930 ordered->truncated_len);
931 } else {
932 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
933 }
934 disk_i_size = BTRFS_I(inode)->disk_i_size;
935
936 /* truncate file */
937 if (disk_i_size > i_size) {
938 BTRFS_I(inode)->disk_i_size = i_size;
939 ret = 0;
940 goto out;
941 }
942
943 /*
944 * if the disk i_size is already at the inode->i_size, or
945 * this ordered extent is inside the disk i_size, we're done
946 */
947 if (disk_i_size == i_size)
948 goto out;
949
950 /*
951 * We still need to update disk_i_size if outstanding_isize is greater
952 * than disk_i_size.
953 */
954 if (offset <= disk_i_size &&
955 (!ordered || ordered->outstanding_isize <= disk_i_size))
956 goto out;
957
958 /*
959 * walk backward from this ordered extent to disk_i_size.
960 * if we find an ordered extent then we can't update disk i_size
961 * yet
962 */
963 if (ordered) {
964 node = rb_prev(&ordered->rb_node);
965 } else {
966 prev = tree_search(tree, offset);
967 /*
968 * we insert file extents without involving ordered struct,
969 * so there should be no ordered struct cover this offset
970 */
971 if (prev) {
972 test = rb_entry(prev, struct btrfs_ordered_extent,
973 rb_node);
974 BUG_ON(offset_in_entry(test, offset));
975 }
976 node = prev;
977 }
978 for (; node; node = rb_prev(node)) {
979 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
980
981 /* We treat this entry as if it doesnt exist */
982 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
983 continue;
984 if (test->file_offset + test->len <= disk_i_size)
985 break;
986 if (test->file_offset >= i_size)
987 break;
988 if (entry_end(test) > disk_i_size) {
989 /*
990 * we don't update disk_i_size now, so record this
991 * undealt i_size. Or we will not know the real
992 * i_size.
993 */
994 if (test->outstanding_isize < offset)
995 test->outstanding_isize = offset;
996 if (ordered &&
997 ordered->outstanding_isize >
998 test->outstanding_isize)
999 test->outstanding_isize =
1000 ordered->outstanding_isize;
1001 goto out;
1002 }
1003 }
1004 new_i_size = min_t(u64, offset, i_size);
1005
1006 /*
1007 * Some ordered extents may completed before the current one, and
1008 * we hold the real i_size in ->outstanding_isize.
1009 */
1010 if (ordered && ordered->outstanding_isize > new_i_size)
1011 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1012 BTRFS_I(inode)->disk_i_size = new_i_size;
1013 ret = 0;
1014 out:
1015 /*
1016 * We need to do this because we can't remove ordered extents until
1017 * after the i_disk_size has been updated and then the inode has been
1018 * updated to reflect the change, so we need to tell anybody who finds
1019 * this ordered extent that we've already done all the real work, we
1020 * just haven't completed all the other work.
1021 */
1022 if (ordered)
1023 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1024 spin_unlock_irq(&tree->lock);
1025 return ret;
1026 }
1027
1028 /*
1029 * search the ordered extents for one corresponding to 'offset' and
1030 * try to find a checksum. This is used because we allow pages to
1031 * be reclaimed before their checksum is actually put into the btree
1032 */
1033 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1034 u32 *sum, int len)
1035 {
1036 struct btrfs_ordered_sum *ordered_sum;
1037 struct btrfs_ordered_extent *ordered;
1038 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1039 unsigned long num_sectors;
1040 unsigned long i;
1041 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1042 int index = 0;
1043
1044 ordered = btrfs_lookup_ordered_extent(inode, offset);
1045 if (!ordered)
1046 return 0;
1047
1048 spin_lock_irq(&tree->lock);
1049 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1050 if (disk_bytenr >= ordered_sum->bytenr &&
1051 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1052 i = (disk_bytenr - ordered_sum->bytenr) >>
1053 inode->i_sb->s_blocksize_bits;
1054 num_sectors = ordered_sum->len >>
1055 inode->i_sb->s_blocksize_bits;
1056 num_sectors = min_t(int, len - index, num_sectors - i);
1057 memcpy(sum + index, ordered_sum->sums + i,
1058 num_sectors);
1059
1060 index += (int)num_sectors;
1061 if (index == len)
1062 goto out;
1063 disk_bytenr += num_sectors * sectorsize;
1064 }
1065 }
1066 out:
1067 spin_unlock_irq(&tree->lock);
1068 btrfs_put_ordered_extent(ordered);
1069 return index;
1070 }
1071
1072
1073 /*
1074 * add a given inode to the list of inodes that must be fully on
1075 * disk before a transaction commit finishes.
1076 *
1077 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1078 * used to make sure renamed files are fully on disk.
1079 *
1080 * It is a noop if the inode is already fully on disk.
1081 *
1082 * If trans is not null, we'll do a friendly check for a transaction that
1083 * is already flushing things and force the IO down ourselves.
1084 */
1085 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1086 struct btrfs_root *root, struct inode *inode)
1087 {
1088 struct btrfs_transaction *cur_trans = trans->transaction;
1089 u64 last_mod;
1090
1091 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
1092
1093 /*
1094 * if this file hasn't been changed since the last transaction
1095 * commit, we can safely return without doing anything
1096 */
1097 if (last_mod < root->fs_info->last_trans_committed)
1098 return;
1099
1100 spin_lock(&root->fs_info->ordered_root_lock);
1101 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1102 list_add_tail(&BTRFS_I(inode)->ordered_operations,
1103 &cur_trans->ordered_operations);
1104 }
1105 spin_unlock(&root->fs_info->ordered_root_lock);
1106 }
1107
1108 int __init ordered_data_init(void)
1109 {
1110 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1111 sizeof(struct btrfs_ordered_extent), 0,
1112 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1113 NULL);
1114 if (!btrfs_ordered_extent_cache)
1115 return -ENOMEM;
1116
1117 return 0;
1118 }
1119
1120 void ordered_data_exit(void)
1121 {
1122 if (btrfs_ordered_extent_cache)
1123 kmem_cache_destroy(btrfs_ordered_extent_cache);
1124 }
This page took 0.05516 seconds and 5 git commands to generate.