Commit | Line | Data |
---|---|---|
dc17ff8f CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
dc17ff8f | 19 | #include <linux/slab.h> |
d6bfde87 | 20 | #include <linux/blkdev.h> |
f421950f CM |
21 | #include <linux/writeback.h> |
22 | #include <linux/pagevec.h> | |
dc17ff8f CM |
23 | #include "ctree.h" |
24 | #include "transaction.h" | |
25 | #include "btrfs_inode.h" | |
e6dcd2dc | 26 | #include "extent_io.h" |
199c2a9c | 27 | #include "disk-io.h" |
ebb8765b | 28 | #include "compression.h" |
dc17ff8f | 29 | |
6352b91d MX |
30 | static struct kmem_cache *btrfs_ordered_extent_cache; |
31 | ||
e6dcd2dc | 32 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 33 | { |
e6dcd2dc CM |
34 | if (entry->file_offset + entry->len < entry->file_offset) |
35 | return (u64)-1; | |
36 | return entry->file_offset + entry->len; | |
dc17ff8f CM |
37 | } |
38 | ||
d352ac68 CM |
39 | /* returns NULL if the insertion worked, or it returns the node it did find |
40 | * in the tree | |
41 | */ | |
e6dcd2dc CM |
42 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
43 | struct rb_node *node) | |
dc17ff8f | 44 | { |
d397712b CM |
45 | struct rb_node **p = &root->rb_node; |
46 | struct rb_node *parent = NULL; | |
e6dcd2dc | 47 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 48 | |
d397712b | 49 | while (*p) { |
dc17ff8f | 50 | parent = *p; |
e6dcd2dc | 51 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 52 | |
e6dcd2dc | 53 | if (file_offset < entry->file_offset) |
dc17ff8f | 54 | p = &(*p)->rb_left; |
e6dcd2dc | 55 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
56 | p = &(*p)->rb_right; |
57 | else | |
58 | return parent; | |
59 | } | |
60 | ||
61 | rb_link_node(node, parent, p); | |
62 | rb_insert_color(node, root); | |
63 | return NULL; | |
64 | } | |
65 | ||
43c04fb1 JM |
66 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
67 | u64 offset) | |
68 | { | |
69 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
70 | btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset " | |
351fd353 | 71 | "%llu", offset); |
43c04fb1 JM |
72 | } |
73 | ||
d352ac68 CM |
74 | /* |
75 | * look for a given offset in the tree, and if it can't be found return the | |
76 | * first lesser offset | |
77 | */ | |
e6dcd2dc CM |
78 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
79 | struct rb_node **prev_ret) | |
dc17ff8f | 80 | { |
d397712b | 81 | struct rb_node *n = root->rb_node; |
dc17ff8f | 82 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
83 | struct rb_node *test; |
84 | struct btrfs_ordered_extent *entry; | |
85 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 86 | |
d397712b | 87 | while (n) { |
e6dcd2dc | 88 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
89 | prev = n; |
90 | prev_entry = entry; | |
dc17ff8f | 91 | |
e6dcd2dc | 92 | if (file_offset < entry->file_offset) |
dc17ff8f | 93 | n = n->rb_left; |
e6dcd2dc | 94 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
95 | n = n->rb_right; |
96 | else | |
97 | return n; | |
98 | } | |
99 | if (!prev_ret) | |
100 | return NULL; | |
101 | ||
d397712b | 102 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
103 | test = rb_next(prev); |
104 | if (!test) | |
105 | break; | |
106 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
107 | rb_node); | |
108 | if (file_offset < entry_end(prev_entry)) | |
109 | break; | |
110 | ||
111 | prev = test; | |
112 | } | |
113 | if (prev) | |
114 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
115 | rb_node); | |
d397712b | 116 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
117 | test = rb_prev(prev); |
118 | if (!test) | |
119 | break; | |
120 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
121 | rb_node); | |
122 | prev = test; | |
dc17ff8f CM |
123 | } |
124 | *prev_ret = prev; | |
125 | return NULL; | |
126 | } | |
127 | ||
d352ac68 CM |
128 | /* |
129 | * helper to check if a given offset is inside a given entry | |
130 | */ | |
e6dcd2dc CM |
131 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
132 | { | |
133 | if (file_offset < entry->file_offset || | |
134 | entry->file_offset + entry->len <= file_offset) | |
135 | return 0; | |
136 | return 1; | |
137 | } | |
138 | ||
4b46fce2 JB |
139 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
140 | u64 len) | |
141 | { | |
142 | if (file_offset + len <= entry->file_offset || | |
143 | entry->file_offset + entry->len <= file_offset) | |
144 | return 0; | |
145 | return 1; | |
146 | } | |
147 | ||
d352ac68 CM |
148 | /* |
149 | * look find the first ordered struct that has this offset, otherwise | |
150 | * the first one less than this offset | |
151 | */ | |
e6dcd2dc CM |
152 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
153 | u64 file_offset) | |
dc17ff8f | 154 | { |
e6dcd2dc | 155 | struct rb_root *root = &tree->tree; |
c87fb6fd | 156 | struct rb_node *prev = NULL; |
dc17ff8f | 157 | struct rb_node *ret; |
e6dcd2dc CM |
158 | struct btrfs_ordered_extent *entry; |
159 | ||
160 | if (tree->last) { | |
161 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
162 | rb_node); | |
163 | if (offset_in_entry(entry, file_offset)) | |
164 | return tree->last; | |
165 | } | |
166 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 167 | if (!ret) |
e6dcd2dc CM |
168 | ret = prev; |
169 | if (ret) | |
170 | tree->last = ret; | |
dc17ff8f CM |
171 | return ret; |
172 | } | |
173 | ||
eb84ae03 CM |
174 | /* allocate and add a new ordered_extent into the per-inode tree. |
175 | * file_offset is the logical offset in the file | |
176 | * | |
177 | * start is the disk block number of an extent already reserved in the | |
178 | * extent allocation tree | |
179 | * | |
180 | * len is the length of the extent | |
181 | * | |
eb84ae03 CM |
182 | * The tree is given a single reference on the ordered extent that was |
183 | * inserted. | |
184 | */ | |
4b46fce2 JB |
185 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
186 | u64 start, u64 len, u64 disk_len, | |
261507a0 | 187 | int type, int dio, int compress_type) |
dc17ff8f | 188 | { |
199c2a9c | 189 | struct btrfs_root *root = BTRFS_I(inode)->root; |
dc17ff8f | 190 | struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc CM |
191 | struct rb_node *node; |
192 | struct btrfs_ordered_extent *entry; | |
dc17ff8f | 193 | |
e6dcd2dc | 194 | tree = &BTRFS_I(inode)->ordered_tree; |
6352b91d | 195 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
196 | if (!entry) |
197 | return -ENOMEM; | |
198 | ||
e6dcd2dc CM |
199 | entry->file_offset = file_offset; |
200 | entry->start = start; | |
201 | entry->len = len; | |
c8b97818 | 202 | entry->disk_len = disk_len; |
8b62b72b | 203 | entry->bytes_left = len; |
5fd02043 | 204 | entry->inode = igrab(inode); |
261507a0 | 205 | entry->compress_type = compress_type; |
77cef2ec | 206 | entry->truncated_len = (u64)-1; |
d899e052 | 207 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff3856 | 208 | set_bit(type, &entry->flags); |
3eaa2885 | 209 | |
4b46fce2 JB |
210 | if (dio) |
211 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); | |
212 | ||
e6dcd2dc CM |
213 | /* one ref for the tree */ |
214 | atomic_set(&entry->refs, 1); | |
215 | init_waitqueue_head(&entry->wait); | |
216 | INIT_LIST_HEAD(&entry->list); | |
3eaa2885 | 217 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
218 | INIT_LIST_HEAD(&entry->work_list); |
219 | init_completion(&entry->completion); | |
2ab28f32 | 220 | INIT_LIST_HEAD(&entry->log_list); |
50d9aa99 | 221 | INIT_LIST_HEAD(&entry->trans_list); |
dc17ff8f | 222 | |
1abe9b8a | 223 | trace_btrfs_ordered_extent_add(inode, entry); |
224 | ||
5fd02043 | 225 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
226 | node = tree_insert(&tree->tree, file_offset, |
227 | &entry->rb_node); | |
43c04fb1 JM |
228 | if (node) |
229 | ordered_data_tree_panic(inode, -EEXIST, file_offset); | |
5fd02043 | 230 | spin_unlock_irq(&tree->lock); |
d397712b | 231 | |
199c2a9c | 232 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 233 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
234 | &root->ordered_extents); |
235 | root->nr_ordered_extents++; | |
236 | if (root->nr_ordered_extents == 1) { | |
237 | spin_lock(&root->fs_info->ordered_root_lock); | |
238 | BUG_ON(!list_empty(&root->ordered_root)); | |
239 | list_add_tail(&root->ordered_root, | |
240 | &root->fs_info->ordered_roots); | |
241 | spin_unlock(&root->fs_info->ordered_root_lock); | |
242 | } | |
243 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 244 | |
dc17ff8f CM |
245 | return 0; |
246 | } | |
247 | ||
4b46fce2 JB |
248 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
249 | u64 start, u64 len, u64 disk_len, int type) | |
250 | { | |
251 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
252 | disk_len, type, 0, |
253 | BTRFS_COMPRESS_NONE); | |
4b46fce2 JB |
254 | } |
255 | ||
256 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, | |
257 | u64 start, u64 len, u64 disk_len, int type) | |
258 | { | |
259 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
261507a0 LZ |
260 | disk_len, type, 1, |
261 | BTRFS_COMPRESS_NONE); | |
262 | } | |
263 | ||
264 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, | |
265 | u64 start, u64 len, u64 disk_len, | |
266 | int type, int compress_type) | |
267 | { | |
268 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, | |
269 | disk_len, type, 0, | |
270 | compress_type); | |
4b46fce2 JB |
271 | } |
272 | ||
eb84ae03 CM |
273 | /* |
274 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
275 | * when an ordered extent is finished. If the list covers more than one |
276 | * ordered extent, it is split across multiples. | |
eb84ae03 | 277 | */ |
143bede5 JM |
278 | void btrfs_add_ordered_sum(struct inode *inode, |
279 | struct btrfs_ordered_extent *entry, | |
280 | struct btrfs_ordered_sum *sum) | |
dc17ff8f | 281 | { |
e6dcd2dc | 282 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 283 | |
e6dcd2dc | 284 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 285 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 286 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 287 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
288 | } |
289 | ||
163cf09c CM |
290 | /* |
291 | * this is used to account for finished IO across a given range | |
292 | * of the file. The IO may span ordered extents. If | |
293 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
294 | * 0. | |
295 | * | |
296 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
297 | * to make sure this function only returns 1 once for a given ordered extent. | |
298 | * | |
299 | * file_offset is updated to one byte past the range that is recorded as | |
300 | * complete. This allows you to walk forward in the file. | |
301 | */ | |
302 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |
303 | struct btrfs_ordered_extent **cached, | |
5fd02043 | 304 | u64 *file_offset, u64 io_size, int uptodate) |
163cf09c CM |
305 | { |
306 | struct btrfs_ordered_inode_tree *tree; | |
307 | struct rb_node *node; | |
308 | struct btrfs_ordered_extent *entry = NULL; | |
309 | int ret; | |
5fd02043 | 310 | unsigned long flags; |
163cf09c CM |
311 | u64 dec_end; |
312 | u64 dec_start; | |
313 | u64 to_dec; | |
314 | ||
315 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 316 | spin_lock_irqsave(&tree->lock, flags); |
163cf09c CM |
317 | node = tree_search(tree, *file_offset); |
318 | if (!node) { | |
319 | ret = 1; | |
320 | goto out; | |
321 | } | |
322 | ||
323 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
324 | if (!offset_in_entry(entry, *file_offset)) { | |
325 | ret = 1; | |
326 | goto out; | |
327 | } | |
328 | ||
329 | dec_start = max(*file_offset, entry->file_offset); | |
330 | dec_end = min(*file_offset + io_size, entry->file_offset + | |
331 | entry->len); | |
332 | *file_offset = dec_end; | |
333 | if (dec_start > dec_end) { | |
efe120a0 FH |
334 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
335 | "bad ordering dec_start %llu end %llu", dec_start, dec_end); | |
163cf09c CM |
336 | } |
337 | to_dec = dec_end - dec_start; | |
338 | if (to_dec > entry->bytes_left) { | |
efe120a0 FH |
339 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
340 | "bad ordered accounting left %llu size %llu", | |
341 | entry->bytes_left, to_dec); | |
163cf09c CM |
342 | } |
343 | entry->bytes_left -= to_dec; | |
5fd02043 JB |
344 | if (!uptodate) |
345 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
346 | ||
af7a6509 | 347 | if (entry->bytes_left == 0) { |
163cf09c | 348 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
349 | /* |
350 | * Implicit memory barrier after test_and_set_bit | |
351 | */ | |
af7a6509 MX |
352 | if (waitqueue_active(&entry->wait)) |
353 | wake_up(&entry->wait); | |
354 | } else { | |
163cf09c | 355 | ret = 1; |
af7a6509 | 356 | } |
163cf09c CM |
357 | out: |
358 | if (!ret && cached && entry) { | |
359 | *cached = entry; | |
360 | atomic_inc(&entry->refs); | |
361 | } | |
5fd02043 | 362 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
363 | return ret == 0; |
364 | } | |
365 | ||
eb84ae03 CM |
366 | /* |
367 | * this is used to account for finished IO across a given range | |
368 | * of the file. The IO should not span ordered extents. If | |
369 | * a given ordered_extent is completely done, 1 is returned, otherwise | |
370 | * 0. | |
371 | * | |
372 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | |
373 | * to make sure this function only returns 1 once for a given ordered extent. | |
374 | */ | |
e6dcd2dc | 375 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1 | 376 | struct btrfs_ordered_extent **cached, |
5fd02043 | 377 | u64 file_offset, u64 io_size, int uptodate) |
dc17ff8f | 378 | { |
e6dcd2dc | 379 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 380 | struct rb_node *node; |
5a1a3df1 | 381 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 382 | unsigned long flags; |
e6dcd2dc CM |
383 | int ret; |
384 | ||
385 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 JB |
386 | spin_lock_irqsave(&tree->lock, flags); |
387 | if (cached && *cached) { | |
388 | entry = *cached; | |
389 | goto have_entry; | |
390 | } | |
391 | ||
e6dcd2dc | 392 | node = tree_search(tree, file_offset); |
dc17ff8f | 393 | if (!node) { |
e6dcd2dc CM |
394 | ret = 1; |
395 | goto out; | |
dc17ff8f CM |
396 | } |
397 | ||
e6dcd2dc | 398 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 399 | have_entry: |
e6dcd2dc CM |
400 | if (!offset_in_entry(entry, file_offset)) { |
401 | ret = 1; | |
402 | goto out; | |
dc17ff8f | 403 | } |
e6dcd2dc | 404 | |
8b62b72b | 405 | if (io_size > entry->bytes_left) { |
efe120a0 FH |
406 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
407 | "bad ordered accounting left %llu size %llu", | |
c1c9ff7c | 408 | entry->bytes_left, io_size); |
8b62b72b CM |
409 | } |
410 | entry->bytes_left -= io_size; | |
5fd02043 JB |
411 | if (!uptodate) |
412 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
413 | ||
af7a6509 | 414 | if (entry->bytes_left == 0) { |
e6dcd2dc | 415 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
a83342aa DS |
416 | /* |
417 | * Implicit memory barrier after test_and_set_bit | |
418 | */ | |
af7a6509 MX |
419 | if (waitqueue_active(&entry->wait)) |
420 | wake_up(&entry->wait); | |
421 | } else { | |
8b62b72b | 422 | ret = 1; |
af7a6509 | 423 | } |
e6dcd2dc | 424 | out: |
5a1a3df1 JB |
425 | if (!ret && cached && entry) { |
426 | *cached = entry; | |
427 | atomic_inc(&entry->refs); | |
428 | } | |
5fd02043 | 429 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
430 | return ret == 0; |
431 | } | |
dc17ff8f | 432 | |
2ab28f32 | 433 | /* Needs to either be called under a log transaction or the log_mutex */ |
827463c4 | 434 | void btrfs_get_logged_extents(struct inode *inode, |
0870295b FM |
435 | struct list_head *logged_list, |
436 | const loff_t start, | |
437 | const loff_t end) | |
2ab28f32 JB |
438 | { |
439 | struct btrfs_ordered_inode_tree *tree; | |
440 | struct btrfs_ordered_extent *ordered; | |
441 | struct rb_node *n; | |
0870295b | 442 | struct rb_node *prev; |
2ab28f32 JB |
443 | |
444 | tree = &BTRFS_I(inode)->ordered_tree; | |
445 | spin_lock_irq(&tree->lock); | |
0870295b FM |
446 | n = __tree_search(&tree->tree, end, &prev); |
447 | if (!n) | |
448 | n = prev; | |
449 | for (; n; n = rb_prev(n)) { | |
2ab28f32 | 450 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
0870295b FM |
451 | if (ordered->file_offset > end) |
452 | continue; | |
453 | if (entry_end(ordered) <= start) | |
454 | break; | |
4d884fce | 455 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
50d9aa99 | 456 | continue; |
0870295b | 457 | list_add(&ordered->log_list, logged_list); |
827463c4 | 458 | atomic_inc(&ordered->refs); |
2ab28f32 JB |
459 | } |
460 | spin_unlock_irq(&tree->lock); | |
461 | } | |
462 | ||
827463c4 MX |
463 | void btrfs_put_logged_extents(struct list_head *logged_list) |
464 | { | |
465 | struct btrfs_ordered_extent *ordered; | |
466 | ||
467 | while (!list_empty(logged_list)) { | |
468 | ordered = list_first_entry(logged_list, | |
469 | struct btrfs_ordered_extent, | |
470 | log_list); | |
471 | list_del_init(&ordered->log_list); | |
472 | btrfs_put_ordered_extent(ordered); | |
473 | } | |
474 | } | |
475 | ||
476 | void btrfs_submit_logged_extents(struct list_head *logged_list, | |
477 | struct btrfs_root *log) | |
478 | { | |
479 | int index = log->log_transid % 2; | |
480 | ||
481 | spin_lock_irq(&log->log_extents_lock[index]); | |
482 | list_splice_tail(logged_list, &log->logged_list[index]); | |
483 | spin_unlock_irq(&log->log_extents_lock[index]); | |
484 | } | |
485 | ||
50d9aa99 JB |
486 | void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, |
487 | struct btrfs_root *log, u64 transid) | |
2ab28f32 JB |
488 | { |
489 | struct btrfs_ordered_extent *ordered; | |
490 | int index = transid % 2; | |
491 | ||
492 | spin_lock_irq(&log->log_extents_lock[index]); | |
493 | while (!list_empty(&log->logged_list[index])) { | |
161c3549 | 494 | struct inode *inode; |
2ab28f32 JB |
495 | ordered = list_first_entry(&log->logged_list[index], |
496 | struct btrfs_ordered_extent, | |
497 | log_list); | |
498 | list_del_init(&ordered->log_list); | |
161c3549 | 499 | inode = ordered->inode; |
2ab28f32 | 500 | spin_unlock_irq(&log->log_extents_lock[index]); |
98ce2ded LB |
501 | |
502 | if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && | |
503 | !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { | |
98ce2ded LB |
504 | u64 start = ordered->file_offset; |
505 | u64 end = ordered->file_offset + ordered->len - 1; | |
506 | ||
507 | WARN_ON(!inode); | |
508 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
509 | } | |
2ab28f32 JB |
510 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
511 | &ordered->flags)); | |
98ce2ded | 512 | |
7558c8bc | 513 | /* |
161c3549 JB |
514 | * In order to keep us from losing our ordered extent |
515 | * information when committing the transaction we have to make | |
516 | * sure that any logged extents are completed when we go to | |
517 | * commit the transaction. To do this we simply increase the | |
518 | * current transactions pending_ordered counter and decrement it | |
519 | * when the ordered extent completes. | |
7558c8bc | 520 | */ |
161c3549 JB |
521 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { |
522 | struct btrfs_ordered_inode_tree *tree; | |
523 | ||
524 | tree = &BTRFS_I(inode)->ordered_tree; | |
525 | spin_lock_irq(&tree->lock); | |
526 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { | |
527 | set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); | |
528 | atomic_inc(&trans->transaction->pending_ordered); | |
529 | } | |
530 | spin_unlock_irq(&tree->lock); | |
531 | } | |
532 | btrfs_put_ordered_extent(ordered); | |
2ab28f32 JB |
533 | spin_lock_irq(&log->log_extents_lock[index]); |
534 | } | |
535 | spin_unlock_irq(&log->log_extents_lock[index]); | |
536 | } | |
537 | ||
538 | void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) | |
539 | { | |
540 | struct btrfs_ordered_extent *ordered; | |
541 | int index = transid % 2; | |
542 | ||
543 | spin_lock_irq(&log->log_extents_lock[index]); | |
544 | while (!list_empty(&log->logged_list[index])) { | |
545 | ordered = list_first_entry(&log->logged_list[index], | |
546 | struct btrfs_ordered_extent, | |
547 | log_list); | |
548 | list_del_init(&ordered->log_list); | |
549 | spin_unlock_irq(&log->log_extents_lock[index]); | |
550 | btrfs_put_ordered_extent(ordered); | |
551 | spin_lock_irq(&log->log_extents_lock[index]); | |
552 | } | |
553 | spin_unlock_irq(&log->log_extents_lock[index]); | |
554 | } | |
555 | ||
eb84ae03 CM |
556 | /* |
557 | * used to drop a reference on an ordered extent. This will free | |
558 | * the extent if the last reference is dropped | |
559 | */ | |
143bede5 | 560 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 561 | { |
ba1da2f4 CM |
562 | struct list_head *cur; |
563 | struct btrfs_ordered_sum *sum; | |
564 | ||
1abe9b8a | 565 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
566 | ||
ba1da2f4 | 567 | if (atomic_dec_and_test(&entry->refs)) { |
61de718f FM |
568 | ASSERT(list_empty(&entry->log_list)); |
569 | ASSERT(list_empty(&entry->trans_list)); | |
570 | ASSERT(list_empty(&entry->root_extent_list)); | |
571 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); | |
5fd02043 JB |
572 | if (entry->inode) |
573 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 574 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
575 | cur = entry->list.next; |
576 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
577 | list_del(&sum->list); | |
578 | kfree(sum); | |
579 | } | |
6352b91d | 580 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 581 | } |
dc17ff8f | 582 | } |
cee36a03 | 583 | |
eb84ae03 CM |
584 | /* |
585 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 586 | * and waiters are woken up. |
eb84ae03 | 587 | */ |
5fd02043 JB |
588 | void btrfs_remove_ordered_extent(struct inode *inode, |
589 | struct btrfs_ordered_extent *entry) | |
cee36a03 | 590 | { |
e6dcd2dc | 591 | struct btrfs_ordered_inode_tree *tree; |
287a0ab9 | 592 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cee36a03 | 593 | struct rb_node *node; |
161c3549 | 594 | bool dec_pending_ordered = false; |
cee36a03 | 595 | |
e6dcd2dc | 596 | tree = &BTRFS_I(inode)->ordered_tree; |
5fd02043 | 597 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 598 | node = &entry->rb_node; |
cee36a03 | 599 | rb_erase(node, &tree->tree); |
61de718f | 600 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
601 | if (tree->last == node) |
602 | tree->last = NULL; | |
e6dcd2dc | 603 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
161c3549 JB |
604 | if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) |
605 | dec_pending_ordered = true; | |
5fd02043 | 606 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 607 | |
161c3549 JB |
608 | /* |
609 | * The current running transaction is waiting on us, we need to let it | |
610 | * know that we're complete and wake it up. | |
611 | */ | |
612 | if (dec_pending_ordered) { | |
613 | struct btrfs_transaction *trans; | |
614 | ||
615 | /* | |
616 | * The checks for trans are just a formality, it should be set, | |
617 | * but if it isn't we don't want to deref/assert under the spin | |
618 | * lock, so be nice and check if trans is set, but ASSERT() so | |
619 | * if it isn't set a developer will notice. | |
620 | */ | |
621 | spin_lock(&root->fs_info->trans_lock); | |
622 | trans = root->fs_info->running_transaction; | |
623 | if (trans) | |
624 | atomic_inc(&trans->use_count); | |
625 | spin_unlock(&root->fs_info->trans_lock); | |
626 | ||
627 | ASSERT(trans); | |
628 | if (trans) { | |
629 | if (atomic_dec_and_test(&trans->pending_ordered)) | |
630 | wake_up(&trans->pending_wait); | |
631 | btrfs_put_transaction(trans); | |
632 | } | |
633 | } | |
634 | ||
199c2a9c | 635 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 636 | list_del_init(&entry->root_extent_list); |
199c2a9c | 637 | root->nr_ordered_extents--; |
5a3f23d5 | 638 | |
1abe9b8a | 639 | trace_btrfs_ordered_extent_remove(inode, entry); |
640 | ||
199c2a9c MX |
641 | if (!root->nr_ordered_extents) { |
642 | spin_lock(&root->fs_info->ordered_root_lock); | |
643 | BUG_ON(list_empty(&root->ordered_root)); | |
644 | list_del_init(&root->ordered_root); | |
645 | spin_unlock(&root->fs_info->ordered_root_lock); | |
646 | } | |
647 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 648 | wake_up(&entry->wait); |
cee36a03 CM |
649 | } |
650 | ||
d458b054 | 651 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
652 | { |
653 | struct btrfs_ordered_extent *ordered; | |
654 | ||
655 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
656 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); | |
657 | complete(&ordered->completion); | |
658 | } | |
659 | ||
d352ac68 CM |
660 | /* |
661 | * wait for all the ordered extents in a root. This is done when balancing | |
662 | * space between drives. | |
663 | */ | |
578def7c FM |
664 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, |
665 | const u64 range_start, const u64 range_len) | |
3eaa2885 | 666 | { |
578def7c FM |
667 | LIST_HEAD(splice); |
668 | LIST_HEAD(skipped); | |
669 | LIST_HEAD(works); | |
9afab882 | 670 | struct btrfs_ordered_extent *ordered, *next; |
b0244199 | 671 | int count = 0; |
578def7c | 672 | const u64 range_end = range_start + range_len; |
3eaa2885 | 673 | |
31f3d255 | 674 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
675 | spin_lock(&root->ordered_extent_lock); |
676 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 677 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
678 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
679 | root_extent_list); | |
578def7c FM |
680 | |
681 | if (range_end <= ordered->start || | |
682 | ordered->start + ordered->disk_len <= range_start) { | |
683 | list_move_tail(&ordered->root_extent_list, &skipped); | |
684 | cond_resched_lock(&root->ordered_extent_lock); | |
685 | continue; | |
686 | } | |
687 | ||
199c2a9c MX |
688 | list_move_tail(&ordered->root_extent_list, |
689 | &root->ordered_extents); | |
199c2a9c MX |
690 | atomic_inc(&ordered->refs); |
691 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 692 | |
a44903ab | 693 | btrfs_init_work(&ordered->flush_work, |
9e0af237 | 694 | btrfs_flush_delalloc_helper, |
a44903ab | 695 | btrfs_run_ordered_extent_work, NULL, NULL); |
199c2a9c | 696 | list_add_tail(&ordered->work_list, &works); |
a44903ab QW |
697 | btrfs_queue_work(root->fs_info->flush_workers, |
698 | &ordered->flush_work); | |
3eaa2885 | 699 | |
9afab882 | 700 | cond_resched(); |
199c2a9c | 701 | spin_lock(&root->ordered_extent_lock); |
b0244199 MX |
702 | if (nr != -1) |
703 | nr--; | |
704 | count++; | |
3eaa2885 | 705 | } |
578def7c | 706 | list_splice_tail(&skipped, &root->ordered_extents); |
b0244199 | 707 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 708 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
709 | |
710 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
711 | list_del_init(&ordered->work_list); | |
712 | wait_for_completion(&ordered->completion); | |
9afab882 | 713 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
714 | cond_resched(); |
715 | } | |
31f3d255 | 716 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
717 | |
718 | return count; | |
3eaa2885 CM |
719 | } |
720 | ||
f0e9b7d6 | 721 | int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, |
578def7c | 722 | const u64 range_start, const u64 range_len) |
199c2a9c MX |
723 | { |
724 | struct btrfs_root *root; | |
725 | struct list_head splice; | |
b0244199 | 726 | int done; |
f0e9b7d6 | 727 | int total_done = 0; |
199c2a9c MX |
728 | |
729 | INIT_LIST_HEAD(&splice); | |
730 | ||
8b9d83cd | 731 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
732 | spin_lock(&fs_info->ordered_root_lock); |
733 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 734 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
735 | root = list_first_entry(&splice, struct btrfs_root, |
736 | ordered_root); | |
737 | root = btrfs_grab_fs_root(root); | |
738 | BUG_ON(!root); | |
739 | list_move_tail(&root->ordered_root, | |
740 | &fs_info->ordered_roots); | |
741 | spin_unlock(&fs_info->ordered_root_lock); | |
742 | ||
578def7c FM |
743 | done = btrfs_wait_ordered_extents(root, nr, |
744 | range_start, range_len); | |
199c2a9c | 745 | btrfs_put_fs_root(root); |
f0e9b7d6 | 746 | total_done += done; |
199c2a9c MX |
747 | |
748 | spin_lock(&fs_info->ordered_root_lock); | |
b0244199 MX |
749 | if (nr != -1) { |
750 | nr -= done; | |
751 | WARN_ON(nr < 0); | |
752 | } | |
199c2a9c | 753 | } |
931aa877 | 754 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 755 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 756 | mutex_unlock(&fs_info->ordered_operations_mutex); |
f0e9b7d6 FM |
757 | |
758 | return total_done; | |
199c2a9c MX |
759 | } |
760 | ||
eb84ae03 CM |
761 | /* |
762 | * Used to start IO or wait for a given ordered extent to finish. | |
763 | * | |
764 | * If wait is one, this effectively waits on page writeback for all the pages | |
765 | * in the extent, and it waits on the io completion code to insert | |
766 | * metadata into the btree corresponding to the extent | |
767 | */ | |
768 | void btrfs_start_ordered_extent(struct inode *inode, | |
769 | struct btrfs_ordered_extent *entry, | |
770 | int wait) | |
e6dcd2dc CM |
771 | { |
772 | u64 start = entry->file_offset; | |
773 | u64 end = start + entry->len - 1; | |
e1b81e67 | 774 | |
1abe9b8a | 775 | trace_btrfs_ordered_extent_start(inode, entry); |
776 | ||
eb84ae03 CM |
777 | /* |
778 | * pages in the range can be dirty, clean or writeback. We | |
779 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 780 | * for the flusher thread to find them |
eb84ae03 | 781 | */ |
4b46fce2 JB |
782 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
783 | filemap_fdatawrite_range(inode->i_mapping, start, end); | |
c8b97818 | 784 | if (wait) { |
e6dcd2dc CM |
785 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
786 | &entry->flags)); | |
c8b97818 | 787 | } |
e6dcd2dc | 788 | } |
cee36a03 | 789 | |
eb84ae03 CM |
790 | /* |
791 | * Used to wait on ordered extents across a large range of bytes. | |
792 | */ | |
0ef8b726 | 793 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 794 | { |
0ef8b726 | 795 | int ret = 0; |
28aeeac1 | 796 | int ret_wb = 0; |
e6dcd2dc | 797 | u64 end; |
e5a2217e | 798 | u64 orig_end; |
e6dcd2dc | 799 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
800 | |
801 | if (start + len < start) { | |
f421950f | 802 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
803 | } else { |
804 | orig_end = start + len - 1; | |
f421950f CM |
805 | if (orig_end > INT_LIMIT(loff_t)) |
806 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 807 | } |
551ebb2d | 808 | |
e5a2217e CM |
809 | /* start IO across the range first to instantiate any delalloc |
810 | * extents | |
811 | */ | |
728404da | 812 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
813 | if (ret) |
814 | return ret; | |
728404da | 815 | |
28aeeac1 FM |
816 | /* |
817 | * If we have a writeback error don't return immediately. Wait first | |
818 | * for any ordered extents that haven't completed yet. This is to make | |
819 | * sure no one can dirty the same page ranges and call writepages() | |
820 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
821 | * when adding the new ordered extents to the ordered tree. | |
822 | */ | |
823 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 824 | |
f421950f | 825 | end = orig_end; |
d397712b | 826 | while (1) { |
e6dcd2dc | 827 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712b | 828 | if (!ordered) |
e6dcd2dc | 829 | break; |
e5a2217e | 830 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
831 | btrfs_put_ordered_extent(ordered); |
832 | break; | |
833 | } | |
b52abf1e | 834 | if (ordered->file_offset + ordered->len <= start) { |
e6dcd2dc CM |
835 | btrfs_put_ordered_extent(ordered); |
836 | break; | |
837 | } | |
e5a2217e | 838 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc | 839 | end = ordered->file_offset; |
0ef8b726 JB |
840 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
841 | ret = -EIO; | |
e6dcd2dc | 842 | btrfs_put_ordered_extent(ordered); |
0ef8b726 | 843 | if (ret || end == 0 || end == start) |
e6dcd2dc CM |
844 | break; |
845 | end--; | |
846 | } | |
28aeeac1 | 847 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
848 | } |
849 | ||
eb84ae03 CM |
850 | /* |
851 | * find an ordered extent corresponding to file_offset. return NULL if | |
852 | * nothing is found, otherwise take a reference on the extent and return it | |
853 | */ | |
e6dcd2dc CM |
854 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
855 | u64 file_offset) | |
856 | { | |
857 | struct btrfs_ordered_inode_tree *tree; | |
858 | struct rb_node *node; | |
859 | struct btrfs_ordered_extent *entry = NULL; | |
860 | ||
861 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 862 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
863 | node = tree_search(tree, file_offset); |
864 | if (!node) | |
865 | goto out; | |
866 | ||
867 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
868 | if (!offset_in_entry(entry, file_offset)) | |
869 | entry = NULL; | |
870 | if (entry) | |
871 | atomic_inc(&entry->refs); | |
872 | out: | |
5fd02043 | 873 | spin_unlock_irq(&tree->lock); |
e6dcd2dc CM |
874 | return entry; |
875 | } | |
876 | ||
4b46fce2 JB |
877 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
878 | * extents that exist in the range, rather than just the start of the range. | |
879 | */ | |
880 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, | |
881 | u64 file_offset, | |
882 | u64 len) | |
883 | { | |
884 | struct btrfs_ordered_inode_tree *tree; | |
885 | struct rb_node *node; | |
886 | struct btrfs_ordered_extent *entry = NULL; | |
887 | ||
888 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 889 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
890 | node = tree_search(tree, file_offset); |
891 | if (!node) { | |
892 | node = tree_search(tree, file_offset + len); | |
893 | if (!node) | |
894 | goto out; | |
895 | } | |
896 | ||
897 | while (1) { | |
898 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
899 | if (range_overlaps(entry, file_offset, len)) | |
900 | break; | |
901 | ||
902 | if (entry->file_offset >= file_offset + len) { | |
903 | entry = NULL; | |
904 | break; | |
905 | } | |
906 | entry = NULL; | |
907 | node = rb_next(node); | |
908 | if (!node) | |
909 | break; | |
910 | } | |
911 | out: | |
912 | if (entry) | |
913 | atomic_inc(&entry->refs); | |
5fd02043 | 914 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
915 | return entry; |
916 | } | |
917 | ||
b659ef02 FM |
918 | bool btrfs_have_ordered_extents_in_range(struct inode *inode, |
919 | u64 file_offset, | |
920 | u64 len) | |
921 | { | |
922 | struct btrfs_ordered_extent *oe; | |
923 | ||
924 | oe = btrfs_lookup_ordered_range(inode, file_offset, len); | |
925 | if (oe) { | |
926 | btrfs_put_ordered_extent(oe); | |
927 | return true; | |
928 | } | |
929 | return false; | |
930 | } | |
931 | ||
eb84ae03 CM |
932 | /* |
933 | * lookup and return any extent before 'file_offset'. NULL is returned | |
934 | * if none is found | |
935 | */ | |
e6dcd2dc | 936 | struct btrfs_ordered_extent * |
d397712b | 937 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc CM |
938 | { |
939 | struct btrfs_ordered_inode_tree *tree; | |
940 | struct rb_node *node; | |
941 | struct btrfs_ordered_extent *entry = NULL; | |
942 | ||
943 | tree = &BTRFS_I(inode)->ordered_tree; | |
5fd02043 | 944 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
945 | node = tree_search(tree, file_offset); |
946 | if (!node) | |
947 | goto out; | |
948 | ||
949 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
950 | atomic_inc(&entry->refs); | |
951 | out: | |
5fd02043 | 952 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 953 | return entry; |
81d7ed29 | 954 | } |
dbe674a9 | 955 | |
eb84ae03 CM |
956 | /* |
957 | * After an extent is done, call this to conditionally update the on disk | |
958 | * i_size. i_size is updated to cover any fully written part of the file. | |
959 | */ | |
c2167754 | 960 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a9 CM |
961 | struct btrfs_ordered_extent *ordered) |
962 | { | |
963 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
dbe674a9 CM |
964 | u64 disk_i_size; |
965 | u64 new_i_size; | |
c2167754 | 966 | u64 i_size = i_size_read(inode); |
dbe674a9 | 967 | struct rb_node *node; |
c2167754 | 968 | struct rb_node *prev = NULL; |
dbe674a9 | 969 | struct btrfs_ordered_extent *test; |
c2167754 | 970 | int ret = 1; |
c0d2f610 | 971 | u64 orig_offset = offset; |
c2167754 | 972 | |
77cef2ec JB |
973 | spin_lock_irq(&tree->lock); |
974 | if (ordered) { | |
c2167754 | 975 | offset = entry_end(ordered); |
77cef2ec JB |
976 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
977 | offset = min(offset, | |
978 | ordered->file_offset + | |
979 | ordered->truncated_len); | |
980 | } else { | |
a038fab0 | 981 | offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
77cef2ec | 982 | } |
dbe674a9 CM |
983 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
984 | ||
c2167754 YZ |
985 | /* truncate file */ |
986 | if (disk_i_size > i_size) { | |
c0d2f610 | 987 | BTRFS_I(inode)->disk_i_size = orig_offset; |
c2167754 YZ |
988 | ret = 0; |
989 | goto out; | |
990 | } | |
991 | ||
dbe674a9 CM |
992 | /* |
993 | * if the disk i_size is already at the inode->i_size, or | |
994 | * this ordered extent is inside the disk i_size, we're done | |
995 | */ | |
5d1f4020 JB |
996 | if (disk_i_size == i_size) |
997 | goto out; | |
998 | ||
999 | /* | |
1000 | * We still need to update disk_i_size if outstanding_isize is greater | |
1001 | * than disk_i_size. | |
1002 | */ | |
1003 | if (offset <= disk_i_size && | |
1004 | (!ordered || ordered->outstanding_isize <= disk_i_size)) | |
dbe674a9 | 1005 | goto out; |
dbe674a9 | 1006 | |
dbe674a9 CM |
1007 | /* |
1008 | * walk backward from this ordered extent to disk_i_size. | |
1009 | * if we find an ordered extent then we can't update disk i_size | |
1010 | * yet | |
1011 | */ | |
c2167754 YZ |
1012 | if (ordered) { |
1013 | node = rb_prev(&ordered->rb_node); | |
1014 | } else { | |
1015 | prev = tree_search(tree, offset); | |
1016 | /* | |
1017 | * we insert file extents without involving ordered struct, | |
1018 | * so there should be no ordered struct cover this offset | |
1019 | */ | |
1020 | if (prev) { | |
1021 | test = rb_entry(prev, struct btrfs_ordered_extent, | |
1022 | rb_node); | |
1023 | BUG_ON(offset_in_entry(test, offset)); | |
1024 | } | |
1025 | node = prev; | |
1026 | } | |
5fd02043 | 1027 | for (; node; node = rb_prev(node)) { |
dbe674a9 | 1028 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 1029 | |
bb7ab3b9 | 1030 | /* We treat this entry as if it doesn't exist */ |
5fd02043 JB |
1031 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) |
1032 | continue; | |
dbe674a9 CM |
1033 | if (test->file_offset + test->len <= disk_i_size) |
1034 | break; | |
c2167754 | 1035 | if (test->file_offset >= i_size) |
dbe674a9 | 1036 | break; |
59fe4f41 | 1037 | if (entry_end(test) > disk_i_size) { |
b9a8cc5b MX |
1038 | /* |
1039 | * we don't update disk_i_size now, so record this | |
1040 | * undealt i_size. Or we will not know the real | |
1041 | * i_size. | |
1042 | */ | |
1043 | if (test->outstanding_isize < offset) | |
1044 | test->outstanding_isize = offset; | |
1045 | if (ordered && | |
1046 | ordered->outstanding_isize > | |
1047 | test->outstanding_isize) | |
1048 | test->outstanding_isize = | |
1049 | ordered->outstanding_isize; | |
dbe674a9 | 1050 | goto out; |
5fd02043 | 1051 | } |
dbe674a9 | 1052 | } |
b9a8cc5b | 1053 | new_i_size = min_t(u64, offset, i_size); |
dbe674a9 CM |
1054 | |
1055 | /* | |
b9a8cc5b MX |
1056 | * Some ordered extents may completed before the current one, and |
1057 | * we hold the real i_size in ->outstanding_isize. | |
dbe674a9 | 1058 | */ |
b9a8cc5b MX |
1059 | if (ordered && ordered->outstanding_isize > new_i_size) |
1060 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); | |
dbe674a9 | 1061 | BTRFS_I(inode)->disk_i_size = new_i_size; |
c2167754 | 1062 | ret = 0; |
dbe674a9 | 1063 | out: |
c2167754 | 1064 | /* |
5fd02043 JB |
1065 | * We need to do this because we can't remove ordered extents until |
1066 | * after the i_disk_size has been updated and then the inode has been | |
1067 | * updated to reflect the change, so we need to tell anybody who finds | |
1068 | * this ordered extent that we've already done all the real work, we | |
1069 | * just haven't completed all the other work. | |
c2167754 YZ |
1070 | */ |
1071 | if (ordered) | |
5fd02043 JB |
1072 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
1073 | spin_unlock_irq(&tree->lock); | |
c2167754 | 1074 | return ret; |
dbe674a9 | 1075 | } |
ba1da2f4 | 1076 | |
eb84ae03 CM |
1077 | /* |
1078 | * search the ordered extents for one corresponding to 'offset' and | |
1079 | * try to find a checksum. This is used because we allow pages to | |
1080 | * be reclaimed before their checksum is actually put into the btree | |
1081 | */ | |
d20f7043 | 1082 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
e4100d98 | 1083 | u32 *sum, int len) |
ba1da2f4 CM |
1084 | { |
1085 | struct btrfs_ordered_sum *ordered_sum; | |
ba1da2f4 CM |
1086 | struct btrfs_ordered_extent *ordered; |
1087 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
3edf7d33 CM |
1088 | unsigned long num_sectors; |
1089 | unsigned long i; | |
1090 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | |
e4100d98 | 1091 | int index = 0; |
ba1da2f4 CM |
1092 | |
1093 | ordered = btrfs_lookup_ordered_extent(inode, offset); | |
1094 | if (!ordered) | |
e4100d98 | 1095 | return 0; |
ba1da2f4 | 1096 | |
5fd02043 | 1097 | spin_lock_irq(&tree->lock); |
c6e30871 | 1098 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
e4100d98 MX |
1099 | if (disk_bytenr >= ordered_sum->bytenr && |
1100 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { | |
1101 | i = (disk_bytenr - ordered_sum->bytenr) >> | |
1102 | inode->i_sb->s_blocksize_bits; | |
e4100d98 MX |
1103 | num_sectors = ordered_sum->len >> |
1104 | inode->i_sb->s_blocksize_bits; | |
f51a4a18 MX |
1105 | num_sectors = min_t(int, len - index, num_sectors - i); |
1106 | memcpy(sum + index, ordered_sum->sums + i, | |
1107 | num_sectors); | |
1108 | ||
1109 | index += (int)num_sectors; | |
1110 | if (index == len) | |
1111 | goto out; | |
1112 | disk_bytenr += num_sectors * sectorsize; | |
ba1da2f4 CM |
1113 | } |
1114 | } | |
1115 | out: | |
5fd02043 | 1116 | spin_unlock_irq(&tree->lock); |
89642229 | 1117 | btrfs_put_ordered_extent(ordered); |
e4100d98 | 1118 | return index; |
ba1da2f4 CM |
1119 | } |
1120 | ||
6352b91d MX |
1121 | int __init ordered_data_init(void) |
1122 | { | |
1123 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1124 | sizeof(struct btrfs_ordered_extent), 0, | |
fba4b697 | 1125 | SLAB_MEM_SPREAD, |
6352b91d MX |
1126 | NULL); |
1127 | if (!btrfs_ordered_extent_cache) | |
1128 | return -ENOMEM; | |
25287e0a | 1129 | |
6352b91d MX |
1130 | return 0; |
1131 | } | |
1132 | ||
1133 | void ordered_data_exit(void) | |
1134 | { | |
5598e900 | 1135 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
6352b91d | 1136 | } |