Btrfs: make sure the backref walker catches all refs to our extent
[deliverable/linux.git] / fs / btrfs / ordered-data.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 #include "disk-io.h"
28
29 static struct kmem_cache *btrfs_ordered_extent_cache;
30
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 {
33 if (entry->file_offset + entry->len < entry->file_offset)
34 return (u64)-1;
35 return entry->file_offset + entry->len;
36 }
37
38 /* returns NULL if the insertion worked, or it returns the node it did find
39 * in the tree
40 */
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 struct rb_node *node)
43 {
44 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
46 struct btrfs_ordered_extent *entry;
47
48 while (*p) {
49 parent = *p;
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51
52 if (file_offset < entry->file_offset)
53 p = &(*p)->rb_left;
54 else if (file_offset >= entry_end(entry))
55 p = &(*p)->rb_right;
56 else
57 return parent;
58 }
59
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
62 return NULL;
63 }
64
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
66 u64 offset)
67 {
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70 "%llu\n", (unsigned long long)offset);
71 }
72
73 /*
74 * look for a given offset in the tree, and if it can't be found return the
75 * first lesser offset
76 */
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
79 {
80 struct rb_node *n = root->rb_node;
81 struct rb_node *prev = NULL;
82 struct rb_node *test;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
85
86 while (n) {
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88 prev = n;
89 prev_entry = entry;
90
91 if (file_offset < entry->file_offset)
92 n = n->rb_left;
93 else if (file_offset >= entry_end(entry))
94 n = n->rb_right;
95 else
96 return n;
97 }
98 if (!prev_ret)
99 return NULL;
100
101 while (prev && file_offset >= entry_end(prev_entry)) {
102 test = rb_next(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 if (file_offset < entry_end(prev_entry))
108 break;
109
110 prev = test;
111 }
112 if (prev)
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 rb_node);
115 while (prev && file_offset < entry_end(prev_entry)) {
116 test = rb_prev(prev);
117 if (!test)
118 break;
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 rb_node);
121 prev = test;
122 }
123 *prev_ret = prev;
124 return NULL;
125 }
126
127 /*
128 * helper to check if a given offset is inside a given entry
129 */
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 {
132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
134 return 0;
135 return 1;
136 }
137
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 u64 len)
140 {
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
143 return 0;
144 return 1;
145 }
146
147 /*
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
150 */
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 u64 file_offset)
153 {
154 struct rb_root *root = &tree->tree;
155 struct rb_node *prev = NULL;
156 struct rb_node *ret;
157 struct btrfs_ordered_extent *entry;
158
159 if (tree->last) {
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 rb_node);
162 if (offset_in_entry(entry, file_offset))
163 return tree->last;
164 }
165 ret = __tree_search(root, file_offset, &prev);
166 if (!ret)
167 ret = prev;
168 if (ret)
169 tree->last = ret;
170 return ret;
171 }
172
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
175 *
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
178 *
179 * len is the length of the extent
180 *
181 * The tree is given a single reference on the ordered extent that was
182 * inserted.
183 */
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
186 int type, int dio, int compress_type)
187 {
188 struct btrfs_root *root = BTRFS_I(inode)->root;
189 struct btrfs_ordered_inode_tree *tree;
190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
192
193 tree = &BTRFS_I(inode)->ordered_tree;
194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195 if (!entry)
196 return -ENOMEM;
197
198 entry->file_offset = file_offset;
199 entry->start = start;
200 entry->len = len;
201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
204 entry->disk_len = disk_len;
205 entry->bytes_left = len;
206 entry->inode = igrab(inode);
207 entry->compress_type = compress_type;
208 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
209 set_bit(type, &entry->flags);
210
211 if (dio)
212 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
213
214 /* one ref for the tree */
215 atomic_set(&entry->refs, 1);
216 init_waitqueue_head(&entry->wait);
217 INIT_LIST_HEAD(&entry->list);
218 INIT_LIST_HEAD(&entry->root_extent_list);
219 INIT_LIST_HEAD(&entry->work_list);
220 init_completion(&entry->completion);
221 INIT_LIST_HEAD(&entry->log_list);
222
223 trace_btrfs_ordered_extent_add(inode, entry);
224
225 spin_lock_irq(&tree->lock);
226 node = tree_insert(&tree->tree, file_offset,
227 &entry->rb_node);
228 if (node)
229 ordered_data_tree_panic(inode, -EEXIST, file_offset);
230 spin_unlock_irq(&tree->lock);
231
232 spin_lock(&root->ordered_extent_lock);
233 list_add_tail(&entry->root_extent_list,
234 &root->ordered_extents);
235 root->nr_ordered_extents++;
236 if (root->nr_ordered_extents == 1) {
237 spin_lock(&root->fs_info->ordered_root_lock);
238 BUG_ON(!list_empty(&root->ordered_root));
239 list_add_tail(&root->ordered_root,
240 &root->fs_info->ordered_roots);
241 spin_unlock(&root->fs_info->ordered_root_lock);
242 }
243 spin_unlock(&root->ordered_extent_lock);
244
245 return 0;
246 }
247
248 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
249 u64 start, u64 len, u64 disk_len, int type)
250 {
251 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
252 disk_len, type, 0,
253 BTRFS_COMPRESS_NONE);
254 }
255
256 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
257 u64 start, u64 len, u64 disk_len, int type)
258 {
259 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
260 disk_len, type, 1,
261 BTRFS_COMPRESS_NONE);
262 }
263
264 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
265 u64 start, u64 len, u64 disk_len,
266 int type, int compress_type)
267 {
268 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
269 disk_len, type, 0,
270 compress_type);
271 }
272
273 /*
274 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
275 * when an ordered extent is finished. If the list covers more than one
276 * ordered extent, it is split across multiples.
277 */
278 void btrfs_add_ordered_sum(struct inode *inode,
279 struct btrfs_ordered_extent *entry,
280 struct btrfs_ordered_sum *sum)
281 {
282 struct btrfs_ordered_inode_tree *tree;
283
284 tree = &BTRFS_I(inode)->ordered_tree;
285 spin_lock_irq(&tree->lock);
286 list_add_tail(&sum->list, &entry->list);
287 WARN_ON(entry->csum_bytes_left < sum->len);
288 entry->csum_bytes_left -= sum->len;
289 if (entry->csum_bytes_left == 0)
290 wake_up(&entry->wait);
291 spin_unlock_irq(&tree->lock);
292 }
293
294 /*
295 * this is used to account for finished IO across a given range
296 * of the file. The IO may span ordered extents. If
297 * a given ordered_extent is completely done, 1 is returned, otherwise
298 * 0.
299 *
300 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
301 * to make sure this function only returns 1 once for a given ordered extent.
302 *
303 * file_offset is updated to one byte past the range that is recorded as
304 * complete. This allows you to walk forward in the file.
305 */
306 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
307 struct btrfs_ordered_extent **cached,
308 u64 *file_offset, u64 io_size, int uptodate)
309 {
310 struct btrfs_ordered_inode_tree *tree;
311 struct rb_node *node;
312 struct btrfs_ordered_extent *entry = NULL;
313 int ret;
314 unsigned long flags;
315 u64 dec_end;
316 u64 dec_start;
317 u64 to_dec;
318
319 tree = &BTRFS_I(inode)->ordered_tree;
320 spin_lock_irqsave(&tree->lock, flags);
321 node = tree_search(tree, *file_offset);
322 if (!node) {
323 ret = 1;
324 goto out;
325 }
326
327 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
328 if (!offset_in_entry(entry, *file_offset)) {
329 ret = 1;
330 goto out;
331 }
332
333 dec_start = max(*file_offset, entry->file_offset);
334 dec_end = min(*file_offset + io_size, entry->file_offset +
335 entry->len);
336 *file_offset = dec_end;
337 if (dec_start > dec_end) {
338 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
339 (unsigned long long)dec_start,
340 (unsigned long long)dec_end);
341 }
342 to_dec = dec_end - dec_start;
343 if (to_dec > entry->bytes_left) {
344 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
345 (unsigned long long)entry->bytes_left,
346 (unsigned long long)to_dec);
347 }
348 entry->bytes_left -= to_dec;
349 if (!uptodate)
350 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
351
352 if (entry->bytes_left == 0)
353 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
354 else
355 ret = 1;
356 out:
357 if (!ret && cached && entry) {
358 *cached = entry;
359 atomic_inc(&entry->refs);
360 }
361 spin_unlock_irqrestore(&tree->lock, flags);
362 return ret == 0;
363 }
364
365 /*
366 * this is used to account for finished IO across a given range
367 * of the file. The IO should not span ordered extents. If
368 * a given ordered_extent is completely done, 1 is returned, otherwise
369 * 0.
370 *
371 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
372 * to make sure this function only returns 1 once for a given ordered extent.
373 */
374 int btrfs_dec_test_ordered_pending(struct inode *inode,
375 struct btrfs_ordered_extent **cached,
376 u64 file_offset, u64 io_size, int uptodate)
377 {
378 struct btrfs_ordered_inode_tree *tree;
379 struct rb_node *node;
380 struct btrfs_ordered_extent *entry = NULL;
381 unsigned long flags;
382 int ret;
383
384 tree = &BTRFS_I(inode)->ordered_tree;
385 spin_lock_irqsave(&tree->lock, flags);
386 if (cached && *cached) {
387 entry = *cached;
388 goto have_entry;
389 }
390
391 node = tree_search(tree, file_offset);
392 if (!node) {
393 ret = 1;
394 goto out;
395 }
396
397 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
398 have_entry:
399 if (!offset_in_entry(entry, file_offset)) {
400 ret = 1;
401 goto out;
402 }
403
404 if (io_size > entry->bytes_left) {
405 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
406 (unsigned long long)entry->bytes_left,
407 (unsigned long long)io_size);
408 }
409 entry->bytes_left -= io_size;
410 if (!uptodate)
411 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
412
413 if (entry->bytes_left == 0)
414 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
415 else
416 ret = 1;
417 out:
418 if (!ret && cached && entry) {
419 *cached = entry;
420 atomic_inc(&entry->refs);
421 }
422 spin_unlock_irqrestore(&tree->lock, flags);
423 return ret == 0;
424 }
425
426 /* Needs to either be called under a log transaction or the log_mutex */
427 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
428 {
429 struct btrfs_ordered_inode_tree *tree;
430 struct btrfs_ordered_extent *ordered;
431 struct rb_node *n;
432 int index = log->log_transid % 2;
433
434 tree = &BTRFS_I(inode)->ordered_tree;
435 spin_lock_irq(&tree->lock);
436 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
437 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
438 spin_lock(&log->log_extents_lock[index]);
439 if (list_empty(&ordered->log_list)) {
440 list_add_tail(&ordered->log_list, &log->logged_list[index]);
441 atomic_inc(&ordered->refs);
442 }
443 spin_unlock(&log->log_extents_lock[index]);
444 }
445 spin_unlock_irq(&tree->lock);
446 }
447
448 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
449 {
450 struct btrfs_ordered_extent *ordered;
451 int index = transid % 2;
452
453 spin_lock_irq(&log->log_extents_lock[index]);
454 while (!list_empty(&log->logged_list[index])) {
455 ordered = list_first_entry(&log->logged_list[index],
456 struct btrfs_ordered_extent,
457 log_list);
458 list_del_init(&ordered->log_list);
459 spin_unlock_irq(&log->log_extents_lock[index]);
460 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
461 &ordered->flags));
462 btrfs_put_ordered_extent(ordered);
463 spin_lock_irq(&log->log_extents_lock[index]);
464 }
465 spin_unlock_irq(&log->log_extents_lock[index]);
466 }
467
468 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
469 {
470 struct btrfs_ordered_extent *ordered;
471 int index = transid % 2;
472
473 spin_lock_irq(&log->log_extents_lock[index]);
474 while (!list_empty(&log->logged_list[index])) {
475 ordered = list_first_entry(&log->logged_list[index],
476 struct btrfs_ordered_extent,
477 log_list);
478 list_del_init(&ordered->log_list);
479 spin_unlock_irq(&log->log_extents_lock[index]);
480 btrfs_put_ordered_extent(ordered);
481 spin_lock_irq(&log->log_extents_lock[index]);
482 }
483 spin_unlock_irq(&log->log_extents_lock[index]);
484 }
485
486 /*
487 * used to drop a reference on an ordered extent. This will free
488 * the extent if the last reference is dropped
489 */
490 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
491 {
492 struct list_head *cur;
493 struct btrfs_ordered_sum *sum;
494
495 trace_btrfs_ordered_extent_put(entry->inode, entry);
496
497 if (atomic_dec_and_test(&entry->refs)) {
498 if (entry->inode)
499 btrfs_add_delayed_iput(entry->inode);
500 while (!list_empty(&entry->list)) {
501 cur = entry->list.next;
502 sum = list_entry(cur, struct btrfs_ordered_sum, list);
503 list_del(&sum->list);
504 kfree(sum);
505 }
506 kmem_cache_free(btrfs_ordered_extent_cache, entry);
507 }
508 }
509
510 /*
511 * remove an ordered extent from the tree. No references are dropped
512 * and waiters are woken up.
513 */
514 void btrfs_remove_ordered_extent(struct inode *inode,
515 struct btrfs_ordered_extent *entry)
516 {
517 struct btrfs_ordered_inode_tree *tree;
518 struct btrfs_root *root = BTRFS_I(inode)->root;
519 struct rb_node *node;
520
521 tree = &BTRFS_I(inode)->ordered_tree;
522 spin_lock_irq(&tree->lock);
523 node = &entry->rb_node;
524 rb_erase(node, &tree->tree);
525 tree->last = NULL;
526 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
527 spin_unlock_irq(&tree->lock);
528
529 spin_lock(&root->ordered_extent_lock);
530 list_del_init(&entry->root_extent_list);
531 root->nr_ordered_extents--;
532
533 trace_btrfs_ordered_extent_remove(inode, entry);
534
535 /*
536 * we have no more ordered extents for this inode and
537 * no dirty pages. We can safely remove it from the
538 * list of ordered extents
539 */
540 if (RB_EMPTY_ROOT(&tree->tree) &&
541 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
542 list_del_init(&BTRFS_I(inode)->ordered_operations);
543 }
544
545 if (!root->nr_ordered_extents) {
546 spin_lock(&root->fs_info->ordered_root_lock);
547 BUG_ON(list_empty(&root->ordered_root));
548 list_del_init(&root->ordered_root);
549 spin_unlock(&root->fs_info->ordered_root_lock);
550 }
551 spin_unlock(&root->ordered_extent_lock);
552 wake_up(&entry->wait);
553 }
554
555 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
556 {
557 struct btrfs_ordered_extent *ordered;
558
559 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
560 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
561 complete(&ordered->completion);
562 }
563
564 /*
565 * wait for all the ordered extents in a root. This is done when balancing
566 * space between drives.
567 */
568 void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
569 {
570 struct list_head splice, works;
571 struct btrfs_ordered_extent *ordered, *next;
572 struct inode *inode;
573
574 INIT_LIST_HEAD(&splice);
575 INIT_LIST_HEAD(&works);
576
577 mutex_lock(&root->fs_info->ordered_operations_mutex);
578 spin_lock(&root->ordered_extent_lock);
579 list_splice_init(&root->ordered_extents, &splice);
580 while (!list_empty(&splice)) {
581 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
582 root_extent_list);
583 list_move_tail(&ordered->root_extent_list,
584 &root->ordered_extents);
585 /*
586 * the inode may be getting freed (in sys_unlink path).
587 */
588 inode = igrab(ordered->inode);
589 if (!inode) {
590 cond_resched_lock(&root->ordered_extent_lock);
591 continue;
592 }
593
594 atomic_inc(&ordered->refs);
595 spin_unlock(&root->ordered_extent_lock);
596
597 ordered->flush_work.func = btrfs_run_ordered_extent_work;
598 list_add_tail(&ordered->work_list, &works);
599 btrfs_queue_worker(&root->fs_info->flush_workers,
600 &ordered->flush_work);
601
602 cond_resched();
603 spin_lock(&root->ordered_extent_lock);
604 }
605 spin_unlock(&root->ordered_extent_lock);
606
607 list_for_each_entry_safe(ordered, next, &works, work_list) {
608 list_del_init(&ordered->work_list);
609 wait_for_completion(&ordered->completion);
610
611 inode = ordered->inode;
612 btrfs_put_ordered_extent(ordered);
613 if (delay_iput)
614 btrfs_add_delayed_iput(inode);
615 else
616 iput(inode);
617
618 cond_resched();
619 }
620 mutex_unlock(&root->fs_info->ordered_operations_mutex);
621 }
622
623 void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
624 int delay_iput)
625 {
626 struct btrfs_root *root;
627 struct list_head splice;
628
629 INIT_LIST_HEAD(&splice);
630
631 spin_lock(&fs_info->ordered_root_lock);
632 list_splice_init(&fs_info->ordered_roots, &splice);
633 while (!list_empty(&splice)) {
634 root = list_first_entry(&splice, struct btrfs_root,
635 ordered_root);
636 root = btrfs_grab_fs_root(root);
637 BUG_ON(!root);
638 list_move_tail(&root->ordered_root,
639 &fs_info->ordered_roots);
640 spin_unlock(&fs_info->ordered_root_lock);
641
642 btrfs_wait_ordered_extents(root, delay_iput);
643 btrfs_put_fs_root(root);
644
645 spin_lock(&fs_info->ordered_root_lock);
646 }
647 spin_unlock(&fs_info->ordered_root_lock);
648 }
649
650 /*
651 * this is used during transaction commit to write all the inodes
652 * added to the ordered operation list. These files must be fully on
653 * disk before the transaction commits.
654 *
655 * we have two modes here, one is to just start the IO via filemap_flush
656 * and the other is to wait for all the io. When we wait, we have an
657 * extra check to make sure the ordered operation list really is empty
658 * before we return
659 */
660 int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
661 struct btrfs_root *root, int wait)
662 {
663 struct btrfs_inode *btrfs_inode;
664 struct inode *inode;
665 struct btrfs_transaction *cur_trans = trans->transaction;
666 struct list_head splice;
667 struct list_head works;
668 struct btrfs_delalloc_work *work, *next;
669 int ret = 0;
670
671 INIT_LIST_HEAD(&splice);
672 INIT_LIST_HEAD(&works);
673
674 mutex_lock(&root->fs_info->ordered_operations_mutex);
675 spin_lock(&root->fs_info->ordered_root_lock);
676 list_splice_init(&cur_trans->ordered_operations, &splice);
677 while (!list_empty(&splice)) {
678 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
679 ordered_operations);
680 inode = &btrfs_inode->vfs_inode;
681
682 list_del_init(&btrfs_inode->ordered_operations);
683
684 /*
685 * the inode may be getting freed (in sys_unlink path).
686 */
687 inode = igrab(inode);
688 if (!inode)
689 continue;
690
691 if (!wait)
692 list_add_tail(&BTRFS_I(inode)->ordered_operations,
693 &cur_trans->ordered_operations);
694 spin_unlock(&root->fs_info->ordered_root_lock);
695
696 work = btrfs_alloc_delalloc_work(inode, wait, 1);
697 if (!work) {
698 spin_lock(&root->fs_info->ordered_root_lock);
699 if (list_empty(&BTRFS_I(inode)->ordered_operations))
700 list_add_tail(&btrfs_inode->ordered_operations,
701 &splice);
702 list_splice_tail(&splice,
703 &cur_trans->ordered_operations);
704 spin_unlock(&root->fs_info->ordered_root_lock);
705 ret = -ENOMEM;
706 goto out;
707 }
708 list_add_tail(&work->list, &works);
709 btrfs_queue_worker(&root->fs_info->flush_workers,
710 &work->work);
711
712 cond_resched();
713 spin_lock(&root->fs_info->ordered_root_lock);
714 }
715 spin_unlock(&root->fs_info->ordered_root_lock);
716 out:
717 list_for_each_entry_safe(work, next, &works, list) {
718 list_del_init(&work->list);
719 btrfs_wait_and_free_delalloc_work(work);
720 }
721 mutex_unlock(&root->fs_info->ordered_operations_mutex);
722 return ret;
723 }
724
725 /*
726 * Used to start IO or wait for a given ordered extent to finish.
727 *
728 * If wait is one, this effectively waits on page writeback for all the pages
729 * in the extent, and it waits on the io completion code to insert
730 * metadata into the btree corresponding to the extent
731 */
732 void btrfs_start_ordered_extent(struct inode *inode,
733 struct btrfs_ordered_extent *entry,
734 int wait)
735 {
736 u64 start = entry->file_offset;
737 u64 end = start + entry->len - 1;
738
739 trace_btrfs_ordered_extent_start(inode, entry);
740
741 /*
742 * pages in the range can be dirty, clean or writeback. We
743 * start IO on any dirty ones so the wait doesn't stall waiting
744 * for the flusher thread to find them
745 */
746 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
747 filemap_fdatawrite_range(inode->i_mapping, start, end);
748 if (wait) {
749 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
750 &entry->flags));
751 }
752 }
753
754 /*
755 * Used to wait on ordered extents across a large range of bytes.
756 */
757 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
758 {
759 u64 end;
760 u64 orig_end;
761 struct btrfs_ordered_extent *ordered;
762
763 if (start + len < start) {
764 orig_end = INT_LIMIT(loff_t);
765 } else {
766 orig_end = start + len - 1;
767 if (orig_end > INT_LIMIT(loff_t))
768 orig_end = INT_LIMIT(loff_t);
769 }
770
771 /* start IO across the range first to instantiate any delalloc
772 * extents
773 */
774 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
775
776 /*
777 * So with compression we will find and lock a dirty page and clear the
778 * first one as dirty, setup an async extent, and immediately return
779 * with the entire range locked but with nobody actually marked with
780 * writeback. So we can't just filemap_write_and_wait_range() and
781 * expect it to work since it will just kick off a thread to do the
782 * actual work. So we need to call filemap_fdatawrite_range _again_
783 * since it will wait on the page lock, which won't be unlocked until
784 * after the pages have been marked as writeback and so we're good to go
785 * from there. We have to do this otherwise we'll miss the ordered
786 * extents and that results in badness. Please Josef, do not think you
787 * know better and pull this out at some point in the future, it is
788 * right and you are wrong.
789 */
790 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
791 &BTRFS_I(inode)->runtime_flags))
792 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
793
794 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
795
796 end = orig_end;
797 while (1) {
798 ordered = btrfs_lookup_first_ordered_extent(inode, end);
799 if (!ordered)
800 break;
801 if (ordered->file_offset > orig_end) {
802 btrfs_put_ordered_extent(ordered);
803 break;
804 }
805 if (ordered->file_offset + ordered->len < start) {
806 btrfs_put_ordered_extent(ordered);
807 break;
808 }
809 btrfs_start_ordered_extent(inode, ordered, 1);
810 end = ordered->file_offset;
811 btrfs_put_ordered_extent(ordered);
812 if (end == 0 || end == start)
813 break;
814 end--;
815 }
816 }
817
818 /*
819 * find an ordered extent corresponding to file_offset. return NULL if
820 * nothing is found, otherwise take a reference on the extent and return it
821 */
822 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
823 u64 file_offset)
824 {
825 struct btrfs_ordered_inode_tree *tree;
826 struct rb_node *node;
827 struct btrfs_ordered_extent *entry = NULL;
828
829 tree = &BTRFS_I(inode)->ordered_tree;
830 spin_lock_irq(&tree->lock);
831 node = tree_search(tree, file_offset);
832 if (!node)
833 goto out;
834
835 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
836 if (!offset_in_entry(entry, file_offset))
837 entry = NULL;
838 if (entry)
839 atomic_inc(&entry->refs);
840 out:
841 spin_unlock_irq(&tree->lock);
842 return entry;
843 }
844
845 /* Since the DIO code tries to lock a wide area we need to look for any ordered
846 * extents that exist in the range, rather than just the start of the range.
847 */
848 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
849 u64 file_offset,
850 u64 len)
851 {
852 struct btrfs_ordered_inode_tree *tree;
853 struct rb_node *node;
854 struct btrfs_ordered_extent *entry = NULL;
855
856 tree = &BTRFS_I(inode)->ordered_tree;
857 spin_lock_irq(&tree->lock);
858 node = tree_search(tree, file_offset);
859 if (!node) {
860 node = tree_search(tree, file_offset + len);
861 if (!node)
862 goto out;
863 }
864
865 while (1) {
866 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
867 if (range_overlaps(entry, file_offset, len))
868 break;
869
870 if (entry->file_offset >= file_offset + len) {
871 entry = NULL;
872 break;
873 }
874 entry = NULL;
875 node = rb_next(node);
876 if (!node)
877 break;
878 }
879 out:
880 if (entry)
881 atomic_inc(&entry->refs);
882 spin_unlock_irq(&tree->lock);
883 return entry;
884 }
885
886 /*
887 * lookup and return any extent before 'file_offset'. NULL is returned
888 * if none is found
889 */
890 struct btrfs_ordered_extent *
891 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
892 {
893 struct btrfs_ordered_inode_tree *tree;
894 struct rb_node *node;
895 struct btrfs_ordered_extent *entry = NULL;
896
897 tree = &BTRFS_I(inode)->ordered_tree;
898 spin_lock_irq(&tree->lock);
899 node = tree_search(tree, file_offset);
900 if (!node)
901 goto out;
902
903 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
904 atomic_inc(&entry->refs);
905 out:
906 spin_unlock_irq(&tree->lock);
907 return entry;
908 }
909
910 /*
911 * After an extent is done, call this to conditionally update the on disk
912 * i_size. i_size is updated to cover any fully written part of the file.
913 */
914 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
915 struct btrfs_ordered_extent *ordered)
916 {
917 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
918 u64 disk_i_size;
919 u64 new_i_size;
920 u64 i_size = i_size_read(inode);
921 struct rb_node *node;
922 struct rb_node *prev = NULL;
923 struct btrfs_ordered_extent *test;
924 int ret = 1;
925
926 if (ordered)
927 offset = entry_end(ordered);
928 else
929 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
930
931 spin_lock_irq(&tree->lock);
932 disk_i_size = BTRFS_I(inode)->disk_i_size;
933
934 /* truncate file */
935 if (disk_i_size > i_size) {
936 BTRFS_I(inode)->disk_i_size = i_size;
937 ret = 0;
938 goto out;
939 }
940
941 /*
942 * if the disk i_size is already at the inode->i_size, or
943 * this ordered extent is inside the disk i_size, we're done
944 */
945 if (disk_i_size == i_size)
946 goto out;
947
948 /*
949 * We still need to update disk_i_size if outstanding_isize is greater
950 * than disk_i_size.
951 */
952 if (offset <= disk_i_size &&
953 (!ordered || ordered->outstanding_isize <= disk_i_size))
954 goto out;
955
956 /*
957 * walk backward from this ordered extent to disk_i_size.
958 * if we find an ordered extent then we can't update disk i_size
959 * yet
960 */
961 if (ordered) {
962 node = rb_prev(&ordered->rb_node);
963 } else {
964 prev = tree_search(tree, offset);
965 /*
966 * we insert file extents without involving ordered struct,
967 * so there should be no ordered struct cover this offset
968 */
969 if (prev) {
970 test = rb_entry(prev, struct btrfs_ordered_extent,
971 rb_node);
972 BUG_ON(offset_in_entry(test, offset));
973 }
974 node = prev;
975 }
976 for (; node; node = rb_prev(node)) {
977 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
978
979 /* We treat this entry as if it doesnt exist */
980 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
981 continue;
982 if (test->file_offset + test->len <= disk_i_size)
983 break;
984 if (test->file_offset >= i_size)
985 break;
986 if (entry_end(test) > disk_i_size) {
987 /*
988 * we don't update disk_i_size now, so record this
989 * undealt i_size. Or we will not know the real
990 * i_size.
991 */
992 if (test->outstanding_isize < offset)
993 test->outstanding_isize = offset;
994 if (ordered &&
995 ordered->outstanding_isize >
996 test->outstanding_isize)
997 test->outstanding_isize =
998 ordered->outstanding_isize;
999 goto out;
1000 }
1001 }
1002 new_i_size = min_t(u64, offset, i_size);
1003
1004 /*
1005 * Some ordered extents may completed before the current one, and
1006 * we hold the real i_size in ->outstanding_isize.
1007 */
1008 if (ordered && ordered->outstanding_isize > new_i_size)
1009 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1010 BTRFS_I(inode)->disk_i_size = new_i_size;
1011 ret = 0;
1012 out:
1013 /*
1014 * We need to do this because we can't remove ordered extents until
1015 * after the i_disk_size has been updated and then the inode has been
1016 * updated to reflect the change, so we need to tell anybody who finds
1017 * this ordered extent that we've already done all the real work, we
1018 * just haven't completed all the other work.
1019 */
1020 if (ordered)
1021 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1022 spin_unlock_irq(&tree->lock);
1023 return ret;
1024 }
1025
1026 /*
1027 * search the ordered extents for one corresponding to 'offset' and
1028 * try to find a checksum. This is used because we allow pages to
1029 * be reclaimed before their checksum is actually put into the btree
1030 */
1031 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1032 u32 *sum, int len)
1033 {
1034 struct btrfs_ordered_sum *ordered_sum;
1035 struct btrfs_ordered_extent *ordered;
1036 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1037 unsigned long num_sectors;
1038 unsigned long i;
1039 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1040 int index = 0;
1041
1042 ordered = btrfs_lookup_ordered_extent(inode, offset);
1043 if (!ordered)
1044 return 0;
1045
1046 spin_lock_irq(&tree->lock);
1047 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1048 if (disk_bytenr >= ordered_sum->bytenr &&
1049 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1050 i = (disk_bytenr - ordered_sum->bytenr) >>
1051 inode->i_sb->s_blocksize_bits;
1052 num_sectors = ordered_sum->len >>
1053 inode->i_sb->s_blocksize_bits;
1054 num_sectors = min_t(int, len - index, num_sectors - i);
1055 memcpy(sum + index, ordered_sum->sums + i,
1056 num_sectors);
1057
1058 index += (int)num_sectors;
1059 if (index == len)
1060 goto out;
1061 disk_bytenr += num_sectors * sectorsize;
1062 }
1063 }
1064 out:
1065 spin_unlock_irq(&tree->lock);
1066 btrfs_put_ordered_extent(ordered);
1067 return index;
1068 }
1069
1070
1071 /*
1072 * add a given inode to the list of inodes that must be fully on
1073 * disk before a transaction commit finishes.
1074 *
1075 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1076 * used to make sure renamed files are fully on disk.
1077 *
1078 * It is a noop if the inode is already fully on disk.
1079 *
1080 * If trans is not null, we'll do a friendly check for a transaction that
1081 * is already flushing things and force the IO down ourselves.
1082 */
1083 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1084 struct btrfs_root *root, struct inode *inode)
1085 {
1086 struct btrfs_transaction *cur_trans = trans->transaction;
1087 u64 last_mod;
1088
1089 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
1090
1091 /*
1092 * if this file hasn't been changed since the last transaction
1093 * commit, we can safely return without doing anything
1094 */
1095 if (last_mod < root->fs_info->last_trans_committed)
1096 return;
1097
1098 spin_lock(&root->fs_info->ordered_root_lock);
1099 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1100 list_add_tail(&BTRFS_I(inode)->ordered_operations,
1101 &cur_trans->ordered_operations);
1102 }
1103 spin_unlock(&root->fs_info->ordered_root_lock);
1104 }
1105
1106 int __init ordered_data_init(void)
1107 {
1108 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1109 sizeof(struct btrfs_ordered_extent), 0,
1110 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1111 NULL);
1112 if (!btrfs_ordered_extent_cache)
1113 return -ENOMEM;
1114
1115 return 0;
1116 }
1117
1118 void ordered_data_exit(void)
1119 {
1120 if (btrfs_ordered_extent_cache)
1121 kmem_cache_destroy(btrfs_ordered_extent_cache);
1122 }
This page took 0.076748 seconds and 5 git commands to generate.