Btrfs: Remove the invalid shrink size check up from btrfs_shrink_dev()
[deliverable/linux.git] / fs / btrfs / ordered-data.c
CommitLineData
dc17ff8f
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
dc17ff8f 19#include <linux/slab.h>
d6bfde87 20#include <linux/blkdev.h>
f421950f
CM
21#include <linux/writeback.h>
22#include <linux/pagevec.h>
dc17ff8f
CM
23#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
e6dcd2dc 26#include "extent_io.h"
dc17ff8f 27
6352b91d
MX
28static struct kmem_cache *btrfs_ordered_extent_cache;
29
e6dcd2dc 30static u64 entry_end(struct btrfs_ordered_extent *entry)
dc17ff8f 31{
e6dcd2dc
CM
32 if (entry->file_offset + entry->len < entry->file_offset)
33 return (u64)-1;
34 return entry->file_offset + entry->len;
dc17ff8f
CM
35}
36
d352ac68
CM
37/* returns NULL if the insertion worked, or it returns the node it did find
38 * in the tree
39 */
e6dcd2dc
CM
40static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
41 struct rb_node *node)
dc17ff8f 42{
d397712b
CM
43 struct rb_node **p = &root->rb_node;
44 struct rb_node *parent = NULL;
e6dcd2dc 45 struct btrfs_ordered_extent *entry;
dc17ff8f 46
d397712b 47 while (*p) {
dc17ff8f 48 parent = *p;
e6dcd2dc 49 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
dc17ff8f 50
e6dcd2dc 51 if (file_offset < entry->file_offset)
dc17ff8f 52 p = &(*p)->rb_left;
e6dcd2dc 53 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
54 p = &(*p)->rb_right;
55 else
56 return parent;
57 }
58
59 rb_link_node(node, parent, p);
60 rb_insert_color(node, root);
61 return NULL;
62}
63
43c04fb1
JM
64static void ordered_data_tree_panic(struct inode *inode, int errno,
65 u64 offset)
66{
67 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
68 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
69 "%llu\n", (unsigned long long)offset);
70}
71
d352ac68
CM
72/*
73 * look for a given offset in the tree, and if it can't be found return the
74 * first lesser offset
75 */
e6dcd2dc
CM
76static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
77 struct rb_node **prev_ret)
dc17ff8f 78{
d397712b 79 struct rb_node *n = root->rb_node;
dc17ff8f 80 struct rb_node *prev = NULL;
e6dcd2dc
CM
81 struct rb_node *test;
82 struct btrfs_ordered_extent *entry;
83 struct btrfs_ordered_extent *prev_entry = NULL;
dc17ff8f 84
d397712b 85 while (n) {
e6dcd2dc 86 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
dc17ff8f
CM
87 prev = n;
88 prev_entry = entry;
dc17ff8f 89
e6dcd2dc 90 if (file_offset < entry->file_offset)
dc17ff8f 91 n = n->rb_left;
e6dcd2dc 92 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
93 n = n->rb_right;
94 else
95 return n;
96 }
97 if (!prev_ret)
98 return NULL;
99
d397712b 100 while (prev && file_offset >= entry_end(prev_entry)) {
e6dcd2dc
CM
101 test = rb_next(prev);
102 if (!test)
103 break;
104 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
105 rb_node);
106 if (file_offset < entry_end(prev_entry))
107 break;
108
109 prev = test;
110 }
111 if (prev)
112 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
113 rb_node);
d397712b 114 while (prev && file_offset < entry_end(prev_entry)) {
e6dcd2dc
CM
115 test = rb_prev(prev);
116 if (!test)
117 break;
118 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
119 rb_node);
120 prev = test;
dc17ff8f
CM
121 }
122 *prev_ret = prev;
123 return NULL;
124}
125
d352ac68
CM
126/*
127 * helper to check if a given offset is inside a given entry
128 */
e6dcd2dc
CM
129static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
130{
131 if (file_offset < entry->file_offset ||
132 entry->file_offset + entry->len <= file_offset)
133 return 0;
134 return 1;
135}
136
4b46fce2
JB
137static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
138 u64 len)
139{
140 if (file_offset + len <= entry->file_offset ||
141 entry->file_offset + entry->len <= file_offset)
142 return 0;
143 return 1;
144}
145
d352ac68
CM
146/*
147 * look find the first ordered struct that has this offset, otherwise
148 * the first one less than this offset
149 */
e6dcd2dc
CM
150static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
151 u64 file_offset)
dc17ff8f 152{
e6dcd2dc 153 struct rb_root *root = &tree->tree;
c87fb6fd 154 struct rb_node *prev = NULL;
dc17ff8f 155 struct rb_node *ret;
e6dcd2dc
CM
156 struct btrfs_ordered_extent *entry;
157
158 if (tree->last) {
159 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
160 rb_node);
161 if (offset_in_entry(entry, file_offset))
162 return tree->last;
163 }
164 ret = __tree_search(root, file_offset, &prev);
dc17ff8f 165 if (!ret)
e6dcd2dc
CM
166 ret = prev;
167 if (ret)
168 tree->last = ret;
dc17ff8f
CM
169 return ret;
170}
171
eb84ae03
CM
172/* allocate and add a new ordered_extent into the per-inode tree.
173 * file_offset is the logical offset in the file
174 *
175 * start is the disk block number of an extent already reserved in the
176 * extent allocation tree
177 *
178 * len is the length of the extent
179 *
eb84ae03
CM
180 * The tree is given a single reference on the ordered extent that was
181 * inserted.
182 */
4b46fce2
JB
183static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
184 u64 start, u64 len, u64 disk_len,
261507a0 185 int type, int dio, int compress_type)
dc17ff8f 186{
dc17ff8f 187 struct btrfs_ordered_inode_tree *tree;
e6dcd2dc
CM
188 struct rb_node *node;
189 struct btrfs_ordered_extent *entry;
dc17ff8f 190
e6dcd2dc 191 tree = &BTRFS_I(inode)->ordered_tree;
6352b91d 192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
dc17ff8f
CM
193 if (!entry)
194 return -ENOMEM;
195
e6dcd2dc
CM
196 entry->file_offset = file_offset;
197 entry->start = start;
198 entry->len = len;
c8b97818 199 entry->disk_len = disk_len;
8b62b72b 200 entry->bytes_left = len;
5fd02043 201 entry->inode = igrab(inode);
261507a0 202 entry->compress_type = compress_type;
d899e052 203 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
80ff3856 204 set_bit(type, &entry->flags);
3eaa2885 205
4b46fce2
JB
206 if (dio)
207 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
208
e6dcd2dc
CM
209 /* one ref for the tree */
210 atomic_set(&entry->refs, 1);
211 init_waitqueue_head(&entry->wait);
212 INIT_LIST_HEAD(&entry->list);
3eaa2885 213 INIT_LIST_HEAD(&entry->root_extent_list);
9afab882
MX
214 INIT_LIST_HEAD(&entry->work_list);
215 init_completion(&entry->completion);
dc17ff8f 216
1abe9b8a 217 trace_btrfs_ordered_extent_add(inode, entry);
218
5fd02043 219 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
220 node = tree_insert(&tree->tree, file_offset,
221 &entry->rb_node);
43c04fb1
JM
222 if (node)
223 ordered_data_tree_panic(inode, -EEXIST, file_offset);
5fd02043 224 spin_unlock_irq(&tree->lock);
d397712b 225
3eaa2885
CM
226 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
227 list_add_tail(&entry->root_extent_list,
228 &BTRFS_I(inode)->root->fs_info->ordered_extents);
229 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
230
dc17ff8f
CM
231 return 0;
232}
233
4b46fce2
JB
234int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
235 u64 start, u64 len, u64 disk_len, int type)
236{
237 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
238 disk_len, type, 0,
239 BTRFS_COMPRESS_NONE);
4b46fce2
JB
240}
241
242int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
243 u64 start, u64 len, u64 disk_len, int type)
244{
245 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
246 disk_len, type, 1,
247 BTRFS_COMPRESS_NONE);
248}
249
250int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
251 u64 start, u64 len, u64 disk_len,
252 int type, int compress_type)
253{
254 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
255 disk_len, type, 0,
256 compress_type);
4b46fce2
JB
257}
258
eb84ae03
CM
259/*
260 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
3edf7d33
CM
261 * when an ordered extent is finished. If the list covers more than one
262 * ordered extent, it is split across multiples.
eb84ae03 263 */
143bede5
JM
264void btrfs_add_ordered_sum(struct inode *inode,
265 struct btrfs_ordered_extent *entry,
266 struct btrfs_ordered_sum *sum)
dc17ff8f 267{
e6dcd2dc 268 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 269
e6dcd2dc 270 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 271 spin_lock_irq(&tree->lock);
e6dcd2dc 272 list_add_tail(&sum->list, &entry->list);
5fd02043 273 spin_unlock_irq(&tree->lock);
dc17ff8f
CM
274}
275
163cf09c
CM
276/*
277 * this is used to account for finished IO across a given range
278 * of the file. The IO may span ordered extents. If
279 * a given ordered_extent is completely done, 1 is returned, otherwise
280 * 0.
281 *
282 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
283 * to make sure this function only returns 1 once for a given ordered extent.
284 *
285 * file_offset is updated to one byte past the range that is recorded as
286 * complete. This allows you to walk forward in the file.
287 */
288int btrfs_dec_test_first_ordered_pending(struct inode *inode,
289 struct btrfs_ordered_extent **cached,
5fd02043 290 u64 *file_offset, u64 io_size, int uptodate)
163cf09c
CM
291{
292 struct btrfs_ordered_inode_tree *tree;
293 struct rb_node *node;
294 struct btrfs_ordered_extent *entry = NULL;
295 int ret;
5fd02043 296 unsigned long flags;
163cf09c
CM
297 u64 dec_end;
298 u64 dec_start;
299 u64 to_dec;
300
301 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 302 spin_lock_irqsave(&tree->lock, flags);
163cf09c
CM
303 node = tree_search(tree, *file_offset);
304 if (!node) {
305 ret = 1;
306 goto out;
307 }
308
309 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
310 if (!offset_in_entry(entry, *file_offset)) {
311 ret = 1;
312 goto out;
313 }
314
315 dec_start = max(*file_offset, entry->file_offset);
316 dec_end = min(*file_offset + io_size, entry->file_offset +
317 entry->len);
318 *file_offset = dec_end;
319 if (dec_start > dec_end) {
320 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
321 (unsigned long long)dec_start,
322 (unsigned long long)dec_end);
323 }
324 to_dec = dec_end - dec_start;
325 if (to_dec > entry->bytes_left) {
326 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
327 (unsigned long long)entry->bytes_left,
328 (unsigned long long)to_dec);
329 }
330 entry->bytes_left -= to_dec;
5fd02043
JB
331 if (!uptodate)
332 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
333
163cf09c
CM
334 if (entry->bytes_left == 0)
335 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
336 else
337 ret = 1;
338out:
339 if (!ret && cached && entry) {
340 *cached = entry;
341 atomic_inc(&entry->refs);
342 }
5fd02043 343 spin_unlock_irqrestore(&tree->lock, flags);
163cf09c
CM
344 return ret == 0;
345}
346
eb84ae03
CM
347/*
348 * this is used to account for finished IO across a given range
349 * of the file. The IO should not span ordered extents. If
350 * a given ordered_extent is completely done, 1 is returned, otherwise
351 * 0.
352 *
353 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
354 * to make sure this function only returns 1 once for a given ordered extent.
355 */
e6dcd2dc 356int btrfs_dec_test_ordered_pending(struct inode *inode,
5a1a3df1 357 struct btrfs_ordered_extent **cached,
5fd02043 358 u64 file_offset, u64 io_size, int uptodate)
dc17ff8f 359{
e6dcd2dc 360 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 361 struct rb_node *node;
5a1a3df1 362 struct btrfs_ordered_extent *entry = NULL;
5fd02043 363 unsigned long flags;
e6dcd2dc
CM
364 int ret;
365
366 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043
JB
367 spin_lock_irqsave(&tree->lock, flags);
368 if (cached && *cached) {
369 entry = *cached;
370 goto have_entry;
371 }
372
e6dcd2dc 373 node = tree_search(tree, file_offset);
dc17ff8f 374 if (!node) {
e6dcd2dc
CM
375 ret = 1;
376 goto out;
dc17ff8f
CM
377 }
378
e6dcd2dc 379 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043 380have_entry:
e6dcd2dc
CM
381 if (!offset_in_entry(entry, file_offset)) {
382 ret = 1;
383 goto out;
dc17ff8f 384 }
e6dcd2dc 385
8b62b72b
CM
386 if (io_size > entry->bytes_left) {
387 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
388 (unsigned long long)entry->bytes_left,
389 (unsigned long long)io_size);
390 }
391 entry->bytes_left -= io_size;
5fd02043
JB
392 if (!uptodate)
393 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
394
8b62b72b 395 if (entry->bytes_left == 0)
e6dcd2dc 396 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
8b62b72b
CM
397 else
398 ret = 1;
e6dcd2dc 399out:
5a1a3df1
JB
400 if (!ret && cached && entry) {
401 *cached = entry;
402 atomic_inc(&entry->refs);
403 }
5fd02043 404 spin_unlock_irqrestore(&tree->lock, flags);
e6dcd2dc
CM
405 return ret == 0;
406}
dc17ff8f 407
eb84ae03
CM
408/*
409 * used to drop a reference on an ordered extent. This will free
410 * the extent if the last reference is dropped
411 */
143bede5 412void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
e6dcd2dc 413{
ba1da2f4
CM
414 struct list_head *cur;
415 struct btrfs_ordered_sum *sum;
416
1abe9b8a 417 trace_btrfs_ordered_extent_put(entry->inode, entry);
418
ba1da2f4 419 if (atomic_dec_and_test(&entry->refs)) {
5fd02043
JB
420 if (entry->inode)
421 btrfs_add_delayed_iput(entry->inode);
d397712b 422 while (!list_empty(&entry->list)) {
ba1da2f4
CM
423 cur = entry->list.next;
424 sum = list_entry(cur, struct btrfs_ordered_sum, list);
425 list_del(&sum->list);
426 kfree(sum);
427 }
6352b91d 428 kmem_cache_free(btrfs_ordered_extent_cache, entry);
ba1da2f4 429 }
dc17ff8f 430}
cee36a03 431
eb84ae03
CM
432/*
433 * remove an ordered extent from the tree. No references are dropped
5fd02043 434 * and waiters are woken up.
eb84ae03 435 */
5fd02043
JB
436void btrfs_remove_ordered_extent(struct inode *inode,
437 struct btrfs_ordered_extent *entry)
cee36a03 438{
e6dcd2dc 439 struct btrfs_ordered_inode_tree *tree;
287a0ab9 440 struct btrfs_root *root = BTRFS_I(inode)->root;
cee36a03 441 struct rb_node *node;
cee36a03 442
e6dcd2dc 443 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 444 spin_lock_irq(&tree->lock);
e6dcd2dc 445 node = &entry->rb_node;
cee36a03 446 rb_erase(node, &tree->tree);
e6dcd2dc
CM
447 tree->last = NULL;
448 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
5fd02043 449 spin_unlock_irq(&tree->lock);
3eaa2885 450
287a0ab9 451 spin_lock(&root->fs_info->ordered_extent_lock);
3eaa2885 452 list_del_init(&entry->root_extent_list);
5a3f23d5 453
1abe9b8a 454 trace_btrfs_ordered_extent_remove(inode, entry);
455
5a3f23d5
CM
456 /*
457 * we have no more ordered extents for this inode and
458 * no dirty pages. We can safely remove it from the
459 * list of ordered extents
460 */
461 if (RB_EMPTY_ROOT(&tree->tree) &&
462 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
463 list_del_init(&BTRFS_I(inode)->ordered_operations);
464 }
287a0ab9 465 spin_unlock(&root->fs_info->ordered_extent_lock);
e6dcd2dc 466 wake_up(&entry->wait);
cee36a03
CM
467}
468
9afab882
MX
469static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
470{
471 struct btrfs_ordered_extent *ordered;
472
473 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
474 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
475 complete(&ordered->completion);
476}
477
d352ac68
CM
478/*
479 * wait for all the ordered extents in a root. This is done when balancing
480 * space between drives.
481 */
6bbe3a9c 482void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
3eaa2885 483{
9afab882 484 struct list_head splice, works;
3eaa2885 485 struct list_head *cur;
9afab882 486 struct btrfs_ordered_extent *ordered, *next;
3eaa2885
CM
487 struct inode *inode;
488
489 INIT_LIST_HEAD(&splice);
9afab882 490 INIT_LIST_HEAD(&works);
3eaa2885
CM
491
492 spin_lock(&root->fs_info->ordered_extent_lock);
493 list_splice_init(&root->fs_info->ordered_extents, &splice);
5b21f2ed 494 while (!list_empty(&splice)) {
3eaa2885
CM
495 cur = splice.next;
496 ordered = list_entry(cur, struct btrfs_ordered_extent,
497 root_extent_list);
498 list_del_init(&ordered->root_extent_list);
499 atomic_inc(&ordered->refs);
3eaa2885
CM
500
501 /*
5b21f2ed 502 * the inode may be getting freed (in sys_unlink path).
3eaa2885 503 */
5b21f2ed
ZY
504 inode = igrab(ordered->inode);
505
3eaa2885
CM
506 spin_unlock(&root->fs_info->ordered_extent_lock);
507
5b21f2ed 508 if (inode) {
9afab882
MX
509 ordered->flush_work.func = btrfs_run_ordered_extent_work;
510 list_add_tail(&ordered->work_list, &works);
511 btrfs_queue_worker(&root->fs_info->flush_workers,
512 &ordered->flush_work);
5b21f2ed
ZY
513 } else {
514 btrfs_put_ordered_extent(ordered);
515 }
3eaa2885 516
9afab882 517 cond_resched();
3eaa2885
CM
518 spin_lock(&root->fs_info->ordered_extent_lock);
519 }
520 spin_unlock(&root->fs_info->ordered_extent_lock);
9afab882
MX
521
522 list_for_each_entry_safe(ordered, next, &works, work_list) {
523 list_del_init(&ordered->work_list);
524 wait_for_completion(&ordered->completion);
525
526 inode = ordered->inode;
527 btrfs_put_ordered_extent(ordered);
528 if (delay_iput)
529 btrfs_add_delayed_iput(inode);
530 else
531 iput(inode);
532
533 cond_resched();
534 }
3eaa2885
CM
535}
536
5a3f23d5
CM
537/*
538 * this is used during transaction commit to write all the inodes
539 * added to the ordered operation list. These files must be fully on
540 * disk before the transaction commits.
541 *
542 * we have two modes here, one is to just start the IO via filemap_flush
543 * and the other is to wait for all the io. When we wait, we have an
544 * extra check to make sure the ordered operation list really is empty
545 * before we return
546 */
25287e0a 547int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
5a3f23d5
CM
548{
549 struct btrfs_inode *btrfs_inode;
550 struct inode *inode;
551 struct list_head splice;
25287e0a
MX
552 struct list_head works;
553 struct btrfs_delalloc_work *work, *next;
554 int ret = 0;
5a3f23d5
CM
555
556 INIT_LIST_HEAD(&splice);
25287e0a 557 INIT_LIST_HEAD(&works);
5a3f23d5
CM
558
559 mutex_lock(&root->fs_info->ordered_operations_mutex);
560 spin_lock(&root->fs_info->ordered_extent_lock);
561again:
562 list_splice_init(&root->fs_info->ordered_operations, &splice);
563
564 while (!list_empty(&splice)) {
25287e0a 565
5a3f23d5
CM
566 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
567 ordered_operations);
568
569 inode = &btrfs_inode->vfs_inode;
570
571 list_del_init(&btrfs_inode->ordered_operations);
572
573 /*
574 * the inode may be getting freed (in sys_unlink path).
575 */
576 inode = igrab(inode);
577
578 if (!wait && inode) {
579 list_add_tail(&BTRFS_I(inode)->ordered_operations,
580 &root->fs_info->ordered_operations);
581 }
25287e0a
MX
582
583 if (!inode)
584 continue;
5a3f23d5
CM
585 spin_unlock(&root->fs_info->ordered_extent_lock);
586
25287e0a
MX
587 work = btrfs_alloc_delalloc_work(inode, wait, 1);
588 if (!work) {
589 if (list_empty(&BTRFS_I(inode)->ordered_operations))
590 list_add_tail(&btrfs_inode->ordered_operations,
591 &splice);
592 spin_lock(&root->fs_info->ordered_extent_lock);
593 list_splice_tail(&splice,
594 &root->fs_info->ordered_operations);
595 spin_unlock(&root->fs_info->ordered_extent_lock);
596 ret = -ENOMEM;
597 goto out;
5a3f23d5 598 }
25287e0a
MX
599 list_add_tail(&work->list, &works);
600 btrfs_queue_worker(&root->fs_info->flush_workers,
601 &work->work);
5a3f23d5
CM
602
603 cond_resched();
604 spin_lock(&root->fs_info->ordered_extent_lock);
605 }
606 if (wait && !list_empty(&root->fs_info->ordered_operations))
607 goto again;
608
609 spin_unlock(&root->fs_info->ordered_extent_lock);
25287e0a
MX
610out:
611 list_for_each_entry_safe(work, next, &works, list) {
612 list_del_init(&work->list);
613 btrfs_wait_and_free_delalloc_work(work);
614 }
5a3f23d5 615 mutex_unlock(&root->fs_info->ordered_operations_mutex);
25287e0a 616 return ret;
5a3f23d5
CM
617}
618
eb84ae03
CM
619/*
620 * Used to start IO or wait for a given ordered extent to finish.
621 *
622 * If wait is one, this effectively waits on page writeback for all the pages
623 * in the extent, and it waits on the io completion code to insert
624 * metadata into the btree corresponding to the extent
625 */
626void btrfs_start_ordered_extent(struct inode *inode,
627 struct btrfs_ordered_extent *entry,
628 int wait)
e6dcd2dc
CM
629{
630 u64 start = entry->file_offset;
631 u64 end = start + entry->len - 1;
e1b81e67 632
1abe9b8a 633 trace_btrfs_ordered_extent_start(inode, entry);
634
eb84ae03
CM
635 /*
636 * pages in the range can be dirty, clean or writeback. We
637 * start IO on any dirty ones so the wait doesn't stall waiting
b2570314 638 * for the flusher thread to find them
eb84ae03 639 */
4b46fce2
JB
640 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
641 filemap_fdatawrite_range(inode->i_mapping, start, end);
c8b97818 642 if (wait) {
e6dcd2dc
CM
643 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
644 &entry->flags));
c8b97818 645 }
e6dcd2dc 646}
cee36a03 647
eb84ae03
CM
648/*
649 * Used to wait on ordered extents across a large range of bytes.
650 */
143bede5 651void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
e6dcd2dc
CM
652{
653 u64 end;
e5a2217e 654 u64 orig_end;
e6dcd2dc 655 struct btrfs_ordered_extent *ordered;
8b62b72b 656 int found;
e5a2217e
CM
657
658 if (start + len < start) {
f421950f 659 orig_end = INT_LIMIT(loff_t);
e5a2217e
CM
660 } else {
661 orig_end = start + len - 1;
f421950f
CM
662 if (orig_end > INT_LIMIT(loff_t))
663 orig_end = INT_LIMIT(loff_t);
e5a2217e 664 }
551ebb2d 665
e5a2217e
CM
666 /* start IO across the range first to instantiate any delalloc
667 * extents
668 */
7ddf5a42
JB
669 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
670
671 /*
672 * So with compression we will find and lock a dirty page and clear the
673 * first one as dirty, setup an async extent, and immediately return
674 * with the entire range locked but with nobody actually marked with
675 * writeback. So we can't just filemap_write_and_wait_range() and
676 * expect it to work since it will just kick off a thread to do the
677 * actual work. So we need to call filemap_fdatawrite_range _again_
678 * since it will wait on the page lock, which won't be unlocked until
679 * after the pages have been marked as writeback and so we're good to go
680 * from there. We have to do this otherwise we'll miss the ordered
681 * extents and that results in badness. Please Josef, do not think you
682 * know better and pull this out at some point in the future, it is
683 * right and you are wrong.
684 */
685 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
686 &BTRFS_I(inode)->runtime_flags))
687 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
688
689 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
e5a2217e 690
f421950f 691 end = orig_end;
8b62b72b 692 found = 0;
d397712b 693 while (1) {
e6dcd2dc 694 ordered = btrfs_lookup_first_ordered_extent(inode, end);
d397712b 695 if (!ordered)
e6dcd2dc 696 break;
e5a2217e 697 if (ordered->file_offset > orig_end) {
e6dcd2dc
CM
698 btrfs_put_ordered_extent(ordered);
699 break;
700 }
701 if (ordered->file_offset + ordered->len < start) {
702 btrfs_put_ordered_extent(ordered);
703 break;
704 }
8b62b72b 705 found++;
e5a2217e 706 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
707 end = ordered->file_offset;
708 btrfs_put_ordered_extent(ordered);
e5a2217e 709 if (end == 0 || end == start)
e6dcd2dc
CM
710 break;
711 end--;
712 }
cee36a03
CM
713}
714
eb84ae03
CM
715/*
716 * find an ordered extent corresponding to file_offset. return NULL if
717 * nothing is found, otherwise take a reference on the extent and return it
718 */
e6dcd2dc
CM
719struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
720 u64 file_offset)
721{
722 struct btrfs_ordered_inode_tree *tree;
723 struct rb_node *node;
724 struct btrfs_ordered_extent *entry = NULL;
725
726 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 727 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
728 node = tree_search(tree, file_offset);
729 if (!node)
730 goto out;
731
732 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
733 if (!offset_in_entry(entry, file_offset))
734 entry = NULL;
735 if (entry)
736 atomic_inc(&entry->refs);
737out:
5fd02043 738 spin_unlock_irq(&tree->lock);
e6dcd2dc
CM
739 return entry;
740}
741
4b46fce2
JB
742/* Since the DIO code tries to lock a wide area we need to look for any ordered
743 * extents that exist in the range, rather than just the start of the range.
744 */
745struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
746 u64 file_offset,
747 u64 len)
748{
749 struct btrfs_ordered_inode_tree *tree;
750 struct rb_node *node;
751 struct btrfs_ordered_extent *entry = NULL;
752
753 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 754 spin_lock_irq(&tree->lock);
4b46fce2
JB
755 node = tree_search(tree, file_offset);
756 if (!node) {
757 node = tree_search(tree, file_offset + len);
758 if (!node)
759 goto out;
760 }
761
762 while (1) {
763 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
764 if (range_overlaps(entry, file_offset, len))
765 break;
766
767 if (entry->file_offset >= file_offset + len) {
768 entry = NULL;
769 break;
770 }
771 entry = NULL;
772 node = rb_next(node);
773 if (!node)
774 break;
775 }
776out:
777 if (entry)
778 atomic_inc(&entry->refs);
5fd02043 779 spin_unlock_irq(&tree->lock);
4b46fce2
JB
780 return entry;
781}
782
eb84ae03
CM
783/*
784 * lookup and return any extent before 'file_offset'. NULL is returned
785 * if none is found
786 */
e6dcd2dc 787struct btrfs_ordered_extent *
d397712b 788btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
e6dcd2dc
CM
789{
790 struct btrfs_ordered_inode_tree *tree;
791 struct rb_node *node;
792 struct btrfs_ordered_extent *entry = NULL;
793
794 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 795 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
796 node = tree_search(tree, file_offset);
797 if (!node)
798 goto out;
799
800 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
801 atomic_inc(&entry->refs);
802out:
5fd02043 803 spin_unlock_irq(&tree->lock);
e6dcd2dc 804 return entry;
81d7ed29 805}
dbe674a9 806
eb84ae03
CM
807/*
808 * After an extent is done, call this to conditionally update the on disk
809 * i_size. i_size is updated to cover any fully written part of the file.
810 */
c2167754 811int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
dbe674a9
CM
812 struct btrfs_ordered_extent *ordered)
813{
814 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
dbe674a9
CM
815 u64 disk_i_size;
816 u64 new_i_size;
c2167754 817 u64 i_size = i_size_read(inode);
dbe674a9 818 struct rb_node *node;
c2167754 819 struct rb_node *prev = NULL;
dbe674a9 820 struct btrfs_ordered_extent *test;
c2167754
YZ
821 int ret = 1;
822
823 if (ordered)
824 offset = entry_end(ordered);
a038fab0
YZ
825 else
826 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
dbe674a9 827
5fd02043 828 spin_lock_irq(&tree->lock);
dbe674a9
CM
829 disk_i_size = BTRFS_I(inode)->disk_i_size;
830
c2167754
YZ
831 /* truncate file */
832 if (disk_i_size > i_size) {
833 BTRFS_I(inode)->disk_i_size = i_size;
834 ret = 0;
835 goto out;
836 }
837
dbe674a9
CM
838 /*
839 * if the disk i_size is already at the inode->i_size, or
840 * this ordered extent is inside the disk i_size, we're done
841 */
c2167754 842 if (disk_i_size == i_size || offset <= disk_i_size) {
dbe674a9
CM
843 goto out;
844 }
845
dbe674a9
CM
846 /*
847 * walk backward from this ordered extent to disk_i_size.
848 * if we find an ordered extent then we can't update disk i_size
849 * yet
850 */
c2167754
YZ
851 if (ordered) {
852 node = rb_prev(&ordered->rb_node);
853 } else {
854 prev = tree_search(tree, offset);
855 /*
856 * we insert file extents without involving ordered struct,
857 * so there should be no ordered struct cover this offset
858 */
859 if (prev) {
860 test = rb_entry(prev, struct btrfs_ordered_extent,
861 rb_node);
862 BUG_ON(offset_in_entry(test, offset));
863 }
864 node = prev;
865 }
5fd02043 866 for (; node; node = rb_prev(node)) {
dbe674a9 867 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043
JB
868
869 /* We treat this entry as if it doesnt exist */
870 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
871 continue;
dbe674a9
CM
872 if (test->file_offset + test->len <= disk_i_size)
873 break;
c2167754 874 if (test->file_offset >= i_size)
dbe674a9 875 break;
b9a8cc5b
MX
876 if (test->file_offset >= disk_i_size) {
877 /*
878 * we don't update disk_i_size now, so record this
879 * undealt i_size. Or we will not know the real
880 * i_size.
881 */
882 if (test->outstanding_isize < offset)
883 test->outstanding_isize = offset;
884 if (ordered &&
885 ordered->outstanding_isize >
886 test->outstanding_isize)
887 test->outstanding_isize =
888 ordered->outstanding_isize;
dbe674a9 889 goto out;
5fd02043 890 }
dbe674a9 891 }
b9a8cc5b 892 new_i_size = min_t(u64, offset, i_size);
dbe674a9
CM
893
894 /*
b9a8cc5b
MX
895 * Some ordered extents may completed before the current one, and
896 * we hold the real i_size in ->outstanding_isize.
dbe674a9 897 */
b9a8cc5b
MX
898 if (ordered && ordered->outstanding_isize > new_i_size)
899 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
dbe674a9 900 BTRFS_I(inode)->disk_i_size = new_i_size;
c2167754 901 ret = 0;
dbe674a9 902out:
c2167754 903 /*
5fd02043
JB
904 * We need to do this because we can't remove ordered extents until
905 * after the i_disk_size has been updated and then the inode has been
906 * updated to reflect the change, so we need to tell anybody who finds
907 * this ordered extent that we've already done all the real work, we
908 * just haven't completed all the other work.
c2167754
YZ
909 */
910 if (ordered)
5fd02043
JB
911 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
912 spin_unlock_irq(&tree->lock);
c2167754 913 return ret;
dbe674a9 914}
ba1da2f4 915
eb84ae03
CM
916/*
917 * search the ordered extents for one corresponding to 'offset' and
918 * try to find a checksum. This is used because we allow pages to
919 * be reclaimed before their checksum is actually put into the btree
920 */
d20f7043
CM
921int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
922 u32 *sum)
ba1da2f4
CM
923{
924 struct btrfs_ordered_sum *ordered_sum;
925 struct btrfs_sector_sum *sector_sums;
926 struct btrfs_ordered_extent *ordered;
927 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
3edf7d33
CM
928 unsigned long num_sectors;
929 unsigned long i;
930 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
ba1da2f4 931 int ret = 1;
ba1da2f4
CM
932
933 ordered = btrfs_lookup_ordered_extent(inode, offset);
934 if (!ordered)
935 return 1;
936
5fd02043 937 spin_lock_irq(&tree->lock);
c6e30871 938 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
d20f7043 939 if (disk_bytenr >= ordered_sum->bytenr) {
3edf7d33 940 num_sectors = ordered_sum->len / sectorsize;
ed98b56a 941 sector_sums = ordered_sum->sums;
3edf7d33 942 for (i = 0; i < num_sectors; i++) {
d20f7043 943 if (sector_sums[i].bytenr == disk_bytenr) {
3edf7d33
CM
944 *sum = sector_sums[i].sum;
945 ret = 0;
946 goto out;
947 }
948 }
ba1da2f4
CM
949 }
950 }
951out:
5fd02043 952 spin_unlock_irq(&tree->lock);
89642229 953 btrfs_put_ordered_extent(ordered);
ba1da2f4
CM
954 return ret;
955}
956
f421950f 957
5a3f23d5
CM
958/*
959 * add a given inode to the list of inodes that must be fully on
960 * disk before a transaction commit finishes.
961 *
962 * This basically gives us the ext3 style data=ordered mode, and it is mostly
963 * used to make sure renamed files are fully on disk.
964 *
965 * It is a noop if the inode is already fully on disk.
966 *
967 * If trans is not null, we'll do a friendly check for a transaction that
968 * is already flushing things and force the IO down ourselves.
969 */
143bede5
JM
970void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
971 struct btrfs_root *root, struct inode *inode)
5a3f23d5
CM
972{
973 u64 last_mod;
974
975 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
976
977 /*
978 * if this file hasn't been changed since the last transaction
979 * commit, we can safely return without doing anything
980 */
981 if (last_mod < root->fs_info->last_trans_committed)
143bede5 982 return;
5a3f23d5 983
5a3f23d5
CM
984 spin_lock(&root->fs_info->ordered_extent_lock);
985 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
986 list_add_tail(&BTRFS_I(inode)->ordered_operations,
987 &root->fs_info->ordered_operations);
988 }
989 spin_unlock(&root->fs_info->ordered_extent_lock);
5a3f23d5 990}
6352b91d
MX
991
992int __init ordered_data_init(void)
993{
994 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
995 sizeof(struct btrfs_ordered_extent), 0,
996 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
997 NULL);
998 if (!btrfs_ordered_extent_cache)
999 return -ENOMEM;
25287e0a 1000
6352b91d
MX
1001 return 0;
1002}
1003
1004void ordered_data_exit(void)
1005{
1006 if (btrfs_ordered_extent_cache)
1007 kmem_cache_destroy(btrfs_ordered_extent_cache);
1008}
This page took 0.256872 seconds and 5 git commands to generate.