btrfs: Avoid trucating page or punching hole in a already existed hole.
[deliverable/linux.git] / fs / btrfs / file.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/aio.h>
28 #include <linux/falloc.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/statfs.h>
32 #include <linux/compat.h>
33 #include <linux/slab.h>
34 #include <linux/btrfs.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
40 #include "tree-log.h"
41 #include "locking.h"
42 #include "volumes.h"
43 #include "qgroup.h"
44
45 static struct kmem_cache *btrfs_inode_defrag_cachep;
46 /*
47 * when auto defrag is enabled we
48 * queue up these defrag structs to remember which
49 * inodes need defragging passes
50 */
51 struct inode_defrag {
52 struct rb_node rb_node;
53 /* objectid */
54 u64 ino;
55 /*
56 * transid where the defrag was added, we search for
57 * extents newer than this
58 */
59 u64 transid;
60
61 /* root objectid */
62 u64 root;
63
64 /* last offset we were able to defrag */
65 u64 last_offset;
66
67 /* if we've wrapped around back to zero once already */
68 int cycled;
69 };
70
71 static int __compare_inode_defrag(struct inode_defrag *defrag1,
72 struct inode_defrag *defrag2)
73 {
74 if (defrag1->root > defrag2->root)
75 return 1;
76 else if (defrag1->root < defrag2->root)
77 return -1;
78 else if (defrag1->ino > defrag2->ino)
79 return 1;
80 else if (defrag1->ino < defrag2->ino)
81 return -1;
82 else
83 return 0;
84 }
85
86 /* pop a record for an inode into the defrag tree. The lock
87 * must be held already
88 *
89 * If you're inserting a record for an older transid than an
90 * existing record, the transid already in the tree is lowered
91 *
92 * If an existing record is found the defrag item you
93 * pass in is freed
94 */
95 static int __btrfs_add_inode_defrag(struct inode *inode,
96 struct inode_defrag *defrag)
97 {
98 struct btrfs_root *root = BTRFS_I(inode)->root;
99 struct inode_defrag *entry;
100 struct rb_node **p;
101 struct rb_node *parent = NULL;
102 int ret;
103
104 p = &root->fs_info->defrag_inodes.rb_node;
105 while (*p) {
106 parent = *p;
107 entry = rb_entry(parent, struct inode_defrag, rb_node);
108
109 ret = __compare_inode_defrag(defrag, entry);
110 if (ret < 0)
111 p = &parent->rb_left;
112 else if (ret > 0)
113 p = &parent->rb_right;
114 else {
115 /* if we're reinserting an entry for
116 * an old defrag run, make sure to
117 * lower the transid of our existing record
118 */
119 if (defrag->transid < entry->transid)
120 entry->transid = defrag->transid;
121 if (defrag->last_offset > entry->last_offset)
122 entry->last_offset = defrag->last_offset;
123 return -EEXIST;
124 }
125 }
126 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
127 rb_link_node(&defrag->rb_node, parent, p);
128 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
129 return 0;
130 }
131
132 static inline int __need_auto_defrag(struct btrfs_root *root)
133 {
134 if (!btrfs_test_opt(root, AUTO_DEFRAG))
135 return 0;
136
137 if (btrfs_fs_closing(root->fs_info))
138 return 0;
139
140 return 1;
141 }
142
143 /*
144 * insert a defrag record for this inode if auto defrag is
145 * enabled
146 */
147 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148 struct inode *inode)
149 {
150 struct btrfs_root *root = BTRFS_I(inode)->root;
151 struct inode_defrag *defrag;
152 u64 transid;
153 int ret;
154
155 if (!__need_auto_defrag(root))
156 return 0;
157
158 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
159 return 0;
160
161 if (trans)
162 transid = trans->transid;
163 else
164 transid = BTRFS_I(inode)->root->last_trans;
165
166 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
167 if (!defrag)
168 return -ENOMEM;
169
170 defrag->ino = btrfs_ino(inode);
171 defrag->transid = transid;
172 defrag->root = root->root_key.objectid;
173
174 spin_lock(&root->fs_info->defrag_inodes_lock);
175 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
176 /*
177 * If we set IN_DEFRAG flag and evict the inode from memory,
178 * and then re-read this inode, this new inode doesn't have
179 * IN_DEFRAG flag. At the case, we may find the existed defrag.
180 */
181 ret = __btrfs_add_inode_defrag(inode, defrag);
182 if (ret)
183 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
184 } else {
185 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
186 }
187 spin_unlock(&root->fs_info->defrag_inodes_lock);
188 return 0;
189 }
190
191 /*
192 * Requeue the defrag object. If there is a defrag object that points to
193 * the same inode in the tree, we will merge them together (by
194 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
195 */
196 static void btrfs_requeue_inode_defrag(struct inode *inode,
197 struct inode_defrag *defrag)
198 {
199 struct btrfs_root *root = BTRFS_I(inode)->root;
200 int ret;
201
202 if (!__need_auto_defrag(root))
203 goto out;
204
205 /*
206 * Here we don't check the IN_DEFRAG flag, because we need merge
207 * them together.
208 */
209 spin_lock(&root->fs_info->defrag_inodes_lock);
210 ret = __btrfs_add_inode_defrag(inode, defrag);
211 spin_unlock(&root->fs_info->defrag_inodes_lock);
212 if (ret)
213 goto out;
214 return;
215 out:
216 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
217 }
218
219 /*
220 * pick the defragable inode that we want, if it doesn't exist, we will get
221 * the next one.
222 */
223 static struct inode_defrag *
224 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
225 {
226 struct inode_defrag *entry = NULL;
227 struct inode_defrag tmp;
228 struct rb_node *p;
229 struct rb_node *parent = NULL;
230 int ret;
231
232 tmp.ino = ino;
233 tmp.root = root;
234
235 spin_lock(&fs_info->defrag_inodes_lock);
236 p = fs_info->defrag_inodes.rb_node;
237 while (p) {
238 parent = p;
239 entry = rb_entry(parent, struct inode_defrag, rb_node);
240
241 ret = __compare_inode_defrag(&tmp, entry);
242 if (ret < 0)
243 p = parent->rb_left;
244 else if (ret > 0)
245 p = parent->rb_right;
246 else
247 goto out;
248 }
249
250 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
251 parent = rb_next(parent);
252 if (parent)
253 entry = rb_entry(parent, struct inode_defrag, rb_node);
254 else
255 entry = NULL;
256 }
257 out:
258 if (entry)
259 rb_erase(parent, &fs_info->defrag_inodes);
260 spin_unlock(&fs_info->defrag_inodes_lock);
261 return entry;
262 }
263
264 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
265 {
266 struct inode_defrag *defrag;
267 struct rb_node *node;
268
269 spin_lock(&fs_info->defrag_inodes_lock);
270 node = rb_first(&fs_info->defrag_inodes);
271 while (node) {
272 rb_erase(node, &fs_info->defrag_inodes);
273 defrag = rb_entry(node, struct inode_defrag, rb_node);
274 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
275
276 if (need_resched()) {
277 spin_unlock(&fs_info->defrag_inodes_lock);
278 cond_resched();
279 spin_lock(&fs_info->defrag_inodes_lock);
280 }
281
282 node = rb_first(&fs_info->defrag_inodes);
283 }
284 spin_unlock(&fs_info->defrag_inodes_lock);
285 }
286
287 #define BTRFS_DEFRAG_BATCH 1024
288
289 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
290 struct inode_defrag *defrag)
291 {
292 struct btrfs_root *inode_root;
293 struct inode *inode;
294 struct btrfs_key key;
295 struct btrfs_ioctl_defrag_range_args range;
296 int num_defrag;
297 int index;
298 int ret;
299
300 /* get the inode */
301 key.objectid = defrag->root;
302 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
303 key.offset = (u64)-1;
304
305 index = srcu_read_lock(&fs_info->subvol_srcu);
306
307 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
308 if (IS_ERR(inode_root)) {
309 ret = PTR_ERR(inode_root);
310 goto cleanup;
311 }
312
313 key.objectid = defrag->ino;
314 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
315 key.offset = 0;
316 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
317 if (IS_ERR(inode)) {
318 ret = PTR_ERR(inode);
319 goto cleanup;
320 }
321 srcu_read_unlock(&fs_info->subvol_srcu, index);
322
323 /* do a chunk of defrag */
324 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
325 memset(&range, 0, sizeof(range));
326 range.len = (u64)-1;
327 range.start = defrag->last_offset;
328
329 sb_start_write(fs_info->sb);
330 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
331 BTRFS_DEFRAG_BATCH);
332 sb_end_write(fs_info->sb);
333 /*
334 * if we filled the whole defrag batch, there
335 * must be more work to do. Queue this defrag
336 * again
337 */
338 if (num_defrag == BTRFS_DEFRAG_BATCH) {
339 defrag->last_offset = range.start;
340 btrfs_requeue_inode_defrag(inode, defrag);
341 } else if (defrag->last_offset && !defrag->cycled) {
342 /*
343 * we didn't fill our defrag batch, but
344 * we didn't start at zero. Make sure we loop
345 * around to the start of the file.
346 */
347 defrag->last_offset = 0;
348 defrag->cycled = 1;
349 btrfs_requeue_inode_defrag(inode, defrag);
350 } else {
351 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
352 }
353
354 iput(inode);
355 return 0;
356 cleanup:
357 srcu_read_unlock(&fs_info->subvol_srcu, index);
358 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
359 return ret;
360 }
361
362 /*
363 * run through the list of inodes in the FS that need
364 * defragging
365 */
366 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
367 {
368 struct inode_defrag *defrag;
369 u64 first_ino = 0;
370 u64 root_objectid = 0;
371
372 atomic_inc(&fs_info->defrag_running);
373 while (1) {
374 /* Pause the auto defragger. */
375 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
376 &fs_info->fs_state))
377 break;
378
379 if (!__need_auto_defrag(fs_info->tree_root))
380 break;
381
382 /* find an inode to defrag */
383 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
384 first_ino);
385 if (!defrag) {
386 if (root_objectid || first_ino) {
387 root_objectid = 0;
388 first_ino = 0;
389 continue;
390 } else {
391 break;
392 }
393 }
394
395 first_ino = defrag->ino + 1;
396 root_objectid = defrag->root;
397
398 __btrfs_run_defrag_inode(fs_info, defrag);
399 }
400 atomic_dec(&fs_info->defrag_running);
401
402 /*
403 * during unmount, we use the transaction_wait queue to
404 * wait for the defragger to stop
405 */
406 wake_up(&fs_info->transaction_wait);
407 return 0;
408 }
409
410 /* simple helper to fault in pages and copy. This should go away
411 * and be replaced with calls into generic code.
412 */
413 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
414 size_t write_bytes,
415 struct page **prepared_pages,
416 struct iov_iter *i)
417 {
418 size_t copied = 0;
419 size_t total_copied = 0;
420 int pg = 0;
421 int offset = pos & (PAGE_CACHE_SIZE - 1);
422
423 while (write_bytes > 0) {
424 size_t count = min_t(size_t,
425 PAGE_CACHE_SIZE - offset, write_bytes);
426 struct page *page = prepared_pages[pg];
427 /*
428 * Copy data from userspace to the current page
429 */
430 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
431
432 /* Flush processor's dcache for this page */
433 flush_dcache_page(page);
434
435 /*
436 * if we get a partial write, we can end up with
437 * partially up to date pages. These add
438 * a lot of complexity, so make sure they don't
439 * happen by forcing this copy to be retried.
440 *
441 * The rest of the btrfs_file_write code will fall
442 * back to page at a time copies after we return 0.
443 */
444 if (!PageUptodate(page) && copied < count)
445 copied = 0;
446
447 iov_iter_advance(i, copied);
448 write_bytes -= copied;
449 total_copied += copied;
450
451 /* Return to btrfs_file_aio_write to fault page */
452 if (unlikely(copied == 0))
453 break;
454
455 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
456 offset += copied;
457 } else {
458 pg++;
459 offset = 0;
460 }
461 }
462 return total_copied;
463 }
464
465 /*
466 * unlocks pages after btrfs_file_write is done with them
467 */
468 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
469 {
470 size_t i;
471 for (i = 0; i < num_pages; i++) {
472 /* page checked is some magic around finding pages that
473 * have been modified without going through btrfs_set_page_dirty
474 * clear it here
475 */
476 ClearPageChecked(pages[i]);
477 unlock_page(pages[i]);
478 mark_page_accessed(pages[i]);
479 page_cache_release(pages[i]);
480 }
481 }
482
483 /*
484 * after copy_from_user, pages need to be dirtied and we need to make
485 * sure holes are created between the current EOF and the start of
486 * any next extents (if required).
487 *
488 * this also makes the decision about creating an inline extent vs
489 * doing real data extents, marking pages dirty and delalloc as required.
490 */
491 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
492 struct page **pages, size_t num_pages,
493 loff_t pos, size_t write_bytes,
494 struct extent_state **cached)
495 {
496 int err = 0;
497 int i;
498 u64 num_bytes;
499 u64 start_pos;
500 u64 end_of_last_block;
501 u64 end_pos = pos + write_bytes;
502 loff_t isize = i_size_read(inode);
503
504 start_pos = pos & ~((u64)root->sectorsize - 1);
505 num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
506
507 end_of_last_block = start_pos + num_bytes - 1;
508 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
509 cached);
510 if (err)
511 return err;
512
513 for (i = 0; i < num_pages; i++) {
514 struct page *p = pages[i];
515 SetPageUptodate(p);
516 ClearPageChecked(p);
517 set_page_dirty(p);
518 }
519
520 /*
521 * we've only changed i_size in ram, and we haven't updated
522 * the disk i_size. There is no need to log the inode
523 * at this time.
524 */
525 if (end_pos > isize)
526 i_size_write(inode, end_pos);
527 return 0;
528 }
529
530 /*
531 * this drops all the extents in the cache that intersect the range
532 * [start, end]. Existing extents are split as required.
533 */
534 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
535 int skip_pinned)
536 {
537 struct extent_map *em;
538 struct extent_map *split = NULL;
539 struct extent_map *split2 = NULL;
540 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
541 u64 len = end - start + 1;
542 u64 gen;
543 int ret;
544 int testend = 1;
545 unsigned long flags;
546 int compressed = 0;
547 bool modified;
548
549 WARN_ON(end < start);
550 if (end == (u64)-1) {
551 len = (u64)-1;
552 testend = 0;
553 }
554 while (1) {
555 int no_splits = 0;
556
557 modified = false;
558 if (!split)
559 split = alloc_extent_map();
560 if (!split2)
561 split2 = alloc_extent_map();
562 if (!split || !split2)
563 no_splits = 1;
564
565 write_lock(&em_tree->lock);
566 em = lookup_extent_mapping(em_tree, start, len);
567 if (!em) {
568 write_unlock(&em_tree->lock);
569 break;
570 }
571 flags = em->flags;
572 gen = em->generation;
573 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
574 if (testend && em->start + em->len >= start + len) {
575 free_extent_map(em);
576 write_unlock(&em_tree->lock);
577 break;
578 }
579 start = em->start + em->len;
580 if (testend)
581 len = start + len - (em->start + em->len);
582 free_extent_map(em);
583 write_unlock(&em_tree->lock);
584 continue;
585 }
586 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
587 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
588 clear_bit(EXTENT_FLAG_LOGGING, &flags);
589 modified = !list_empty(&em->list);
590 if (no_splits)
591 goto next;
592
593 if (em->start < start) {
594 split->start = em->start;
595 split->len = start - em->start;
596
597 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
598 split->orig_start = em->orig_start;
599 split->block_start = em->block_start;
600
601 if (compressed)
602 split->block_len = em->block_len;
603 else
604 split->block_len = split->len;
605 split->orig_block_len = max(split->block_len,
606 em->orig_block_len);
607 split->ram_bytes = em->ram_bytes;
608 } else {
609 split->orig_start = split->start;
610 split->block_len = 0;
611 split->block_start = em->block_start;
612 split->orig_block_len = 0;
613 split->ram_bytes = split->len;
614 }
615
616 split->generation = gen;
617 split->bdev = em->bdev;
618 split->flags = flags;
619 split->compress_type = em->compress_type;
620 replace_extent_mapping(em_tree, em, split, modified);
621 free_extent_map(split);
622 split = split2;
623 split2 = NULL;
624 }
625 if (testend && em->start + em->len > start + len) {
626 u64 diff = start + len - em->start;
627
628 split->start = start + len;
629 split->len = em->start + em->len - (start + len);
630 split->bdev = em->bdev;
631 split->flags = flags;
632 split->compress_type = em->compress_type;
633 split->generation = gen;
634
635 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
636 split->orig_block_len = max(em->block_len,
637 em->orig_block_len);
638
639 split->ram_bytes = em->ram_bytes;
640 if (compressed) {
641 split->block_len = em->block_len;
642 split->block_start = em->block_start;
643 split->orig_start = em->orig_start;
644 } else {
645 split->block_len = split->len;
646 split->block_start = em->block_start
647 + diff;
648 split->orig_start = em->orig_start;
649 }
650 } else {
651 split->ram_bytes = split->len;
652 split->orig_start = split->start;
653 split->block_len = 0;
654 split->block_start = em->block_start;
655 split->orig_block_len = 0;
656 }
657
658 if (extent_map_in_tree(em)) {
659 replace_extent_mapping(em_tree, em, split,
660 modified);
661 } else {
662 ret = add_extent_mapping(em_tree, split,
663 modified);
664 ASSERT(ret == 0); /* Logic error */
665 }
666 free_extent_map(split);
667 split = NULL;
668 }
669 next:
670 if (extent_map_in_tree(em))
671 remove_extent_mapping(em_tree, em);
672 write_unlock(&em_tree->lock);
673
674 /* once for us */
675 free_extent_map(em);
676 /* once for the tree*/
677 free_extent_map(em);
678 }
679 if (split)
680 free_extent_map(split);
681 if (split2)
682 free_extent_map(split2);
683 }
684
685 /*
686 * this is very complex, but the basic idea is to drop all extents
687 * in the range start - end. hint_block is filled in with a block number
688 * that would be a good hint to the block allocator for this file.
689 *
690 * If an extent intersects the range but is not entirely inside the range
691 * it is either truncated or split. Anything entirely inside the range
692 * is deleted from the tree.
693 */
694 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
695 struct btrfs_root *root, struct inode *inode,
696 struct btrfs_path *path, u64 start, u64 end,
697 u64 *drop_end, int drop_cache,
698 int replace_extent,
699 u32 extent_item_size,
700 int *key_inserted)
701 {
702 struct extent_buffer *leaf;
703 struct btrfs_file_extent_item *fi;
704 struct btrfs_key key;
705 struct btrfs_key new_key;
706 u64 ino = btrfs_ino(inode);
707 u64 search_start = start;
708 u64 disk_bytenr = 0;
709 u64 num_bytes = 0;
710 u64 extent_offset = 0;
711 u64 extent_end = 0;
712 int del_nr = 0;
713 int del_slot = 0;
714 int extent_type;
715 int recow;
716 int ret;
717 int modify_tree = -1;
718 int update_refs;
719 int found = 0;
720 int leafs_visited = 0;
721
722 if (drop_cache)
723 btrfs_drop_extent_cache(inode, start, end - 1, 0);
724
725 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
726 modify_tree = 0;
727
728 update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
729 root == root->fs_info->tree_root);
730 while (1) {
731 recow = 0;
732 ret = btrfs_lookup_file_extent(trans, root, path, ino,
733 search_start, modify_tree);
734 if (ret < 0)
735 break;
736 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
737 leaf = path->nodes[0];
738 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
739 if (key.objectid == ino &&
740 key.type == BTRFS_EXTENT_DATA_KEY)
741 path->slots[0]--;
742 }
743 ret = 0;
744 leafs_visited++;
745 next_slot:
746 leaf = path->nodes[0];
747 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
748 BUG_ON(del_nr > 0);
749 ret = btrfs_next_leaf(root, path);
750 if (ret < 0)
751 break;
752 if (ret > 0) {
753 ret = 0;
754 break;
755 }
756 leafs_visited++;
757 leaf = path->nodes[0];
758 recow = 1;
759 }
760
761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
762 if (key.objectid > ino ||
763 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
764 break;
765
766 fi = btrfs_item_ptr(leaf, path->slots[0],
767 struct btrfs_file_extent_item);
768 extent_type = btrfs_file_extent_type(leaf, fi);
769
770 if (extent_type == BTRFS_FILE_EXTENT_REG ||
771 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
772 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
773 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
774 extent_offset = btrfs_file_extent_offset(leaf, fi);
775 extent_end = key.offset +
776 btrfs_file_extent_num_bytes(leaf, fi);
777 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
778 extent_end = key.offset +
779 btrfs_file_extent_inline_len(leaf,
780 path->slots[0], fi);
781 } else {
782 WARN_ON(1);
783 extent_end = search_start;
784 }
785
786 /*
787 * Don't skip extent items representing 0 byte lengths. They
788 * used to be created (bug) if while punching holes we hit
789 * -ENOSPC condition. So if we find one here, just ensure we
790 * delete it, otherwise we would insert a new file extent item
791 * with the same key (offset) as that 0 bytes length file
792 * extent item in the call to setup_items_for_insert() later
793 * in this function.
794 */
795 if (extent_end == key.offset && extent_end >= search_start)
796 goto delete_extent_item;
797
798 if (extent_end <= search_start) {
799 path->slots[0]++;
800 goto next_slot;
801 }
802
803 found = 1;
804 search_start = max(key.offset, start);
805 if (recow || !modify_tree) {
806 modify_tree = -1;
807 btrfs_release_path(path);
808 continue;
809 }
810
811 /*
812 * | - range to drop - |
813 * | -------- extent -------- |
814 */
815 if (start > key.offset && end < extent_end) {
816 BUG_ON(del_nr > 0);
817 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
818 ret = -EOPNOTSUPP;
819 break;
820 }
821
822 memcpy(&new_key, &key, sizeof(new_key));
823 new_key.offset = start;
824 ret = btrfs_duplicate_item(trans, root, path,
825 &new_key);
826 if (ret == -EAGAIN) {
827 btrfs_release_path(path);
828 continue;
829 }
830 if (ret < 0)
831 break;
832
833 leaf = path->nodes[0];
834 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
835 struct btrfs_file_extent_item);
836 btrfs_set_file_extent_num_bytes(leaf, fi,
837 start - key.offset);
838
839 fi = btrfs_item_ptr(leaf, path->slots[0],
840 struct btrfs_file_extent_item);
841
842 extent_offset += start - key.offset;
843 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
844 btrfs_set_file_extent_num_bytes(leaf, fi,
845 extent_end - start);
846 btrfs_mark_buffer_dirty(leaf);
847
848 if (update_refs && disk_bytenr > 0) {
849 ret = btrfs_inc_extent_ref(trans, root,
850 disk_bytenr, num_bytes, 0,
851 root->root_key.objectid,
852 new_key.objectid,
853 start - extent_offset, 1);
854 BUG_ON(ret); /* -ENOMEM */
855 }
856 key.offset = start;
857 }
858 /*
859 * | ---- range to drop ----- |
860 * | -------- extent -------- |
861 */
862 if (start <= key.offset && end < extent_end) {
863 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
864 ret = -EOPNOTSUPP;
865 break;
866 }
867
868 memcpy(&new_key, &key, sizeof(new_key));
869 new_key.offset = end;
870 btrfs_set_item_key_safe(root, path, &new_key);
871
872 extent_offset += end - key.offset;
873 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
874 btrfs_set_file_extent_num_bytes(leaf, fi,
875 extent_end - end);
876 btrfs_mark_buffer_dirty(leaf);
877 if (update_refs && disk_bytenr > 0)
878 inode_sub_bytes(inode, end - key.offset);
879 break;
880 }
881
882 search_start = extent_end;
883 /*
884 * | ---- range to drop ----- |
885 * | -------- extent -------- |
886 */
887 if (start > key.offset && end >= extent_end) {
888 BUG_ON(del_nr > 0);
889 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
890 ret = -EOPNOTSUPP;
891 break;
892 }
893
894 btrfs_set_file_extent_num_bytes(leaf, fi,
895 start - key.offset);
896 btrfs_mark_buffer_dirty(leaf);
897 if (update_refs && disk_bytenr > 0)
898 inode_sub_bytes(inode, extent_end - start);
899 if (end == extent_end)
900 break;
901
902 path->slots[0]++;
903 goto next_slot;
904 }
905
906 /*
907 * | ---- range to drop ----- |
908 * | ------ extent ------ |
909 */
910 if (start <= key.offset && end >= extent_end) {
911 delete_extent_item:
912 if (del_nr == 0) {
913 del_slot = path->slots[0];
914 del_nr = 1;
915 } else {
916 BUG_ON(del_slot + del_nr != path->slots[0]);
917 del_nr++;
918 }
919
920 if (update_refs &&
921 extent_type == BTRFS_FILE_EXTENT_INLINE) {
922 inode_sub_bytes(inode,
923 extent_end - key.offset);
924 extent_end = ALIGN(extent_end,
925 root->sectorsize);
926 } else if (update_refs && disk_bytenr > 0) {
927 ret = btrfs_free_extent(trans, root,
928 disk_bytenr, num_bytes, 0,
929 root->root_key.objectid,
930 key.objectid, key.offset -
931 extent_offset, 0);
932 BUG_ON(ret); /* -ENOMEM */
933 inode_sub_bytes(inode,
934 extent_end - key.offset);
935 }
936
937 if (end == extent_end)
938 break;
939
940 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
941 path->slots[0]++;
942 goto next_slot;
943 }
944
945 ret = btrfs_del_items(trans, root, path, del_slot,
946 del_nr);
947 if (ret) {
948 btrfs_abort_transaction(trans, root, ret);
949 break;
950 }
951
952 del_nr = 0;
953 del_slot = 0;
954
955 btrfs_release_path(path);
956 continue;
957 }
958
959 BUG_ON(1);
960 }
961
962 if (!ret && del_nr > 0) {
963 /*
964 * Set path->slots[0] to first slot, so that after the delete
965 * if items are move off from our leaf to its immediate left or
966 * right neighbor leafs, we end up with a correct and adjusted
967 * path->slots[0] for our insertion (if replace_extent != 0).
968 */
969 path->slots[0] = del_slot;
970 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
971 if (ret)
972 btrfs_abort_transaction(trans, root, ret);
973 }
974
975 leaf = path->nodes[0];
976 /*
977 * If btrfs_del_items() was called, it might have deleted a leaf, in
978 * which case it unlocked our path, so check path->locks[0] matches a
979 * write lock.
980 */
981 if (!ret && replace_extent && leafs_visited == 1 &&
982 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
983 path->locks[0] == BTRFS_WRITE_LOCK) &&
984 btrfs_leaf_free_space(root, leaf) >=
985 sizeof(struct btrfs_item) + extent_item_size) {
986
987 key.objectid = ino;
988 key.type = BTRFS_EXTENT_DATA_KEY;
989 key.offset = start;
990 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
991 struct btrfs_key slot_key;
992
993 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
994 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
995 path->slots[0]++;
996 }
997 setup_items_for_insert(root, path, &key,
998 &extent_item_size,
999 extent_item_size,
1000 sizeof(struct btrfs_item) +
1001 extent_item_size, 1);
1002 *key_inserted = 1;
1003 }
1004
1005 if (!replace_extent || !(*key_inserted))
1006 btrfs_release_path(path);
1007 if (drop_end)
1008 *drop_end = found ? min(end, extent_end) : end;
1009 return ret;
1010 }
1011
1012 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1013 struct btrfs_root *root, struct inode *inode, u64 start,
1014 u64 end, int drop_cache)
1015 {
1016 struct btrfs_path *path;
1017 int ret;
1018
1019 path = btrfs_alloc_path();
1020 if (!path)
1021 return -ENOMEM;
1022 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1023 drop_cache, 0, 0, NULL);
1024 btrfs_free_path(path);
1025 return ret;
1026 }
1027
1028 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1029 u64 objectid, u64 bytenr, u64 orig_offset,
1030 u64 *start, u64 *end)
1031 {
1032 struct btrfs_file_extent_item *fi;
1033 struct btrfs_key key;
1034 u64 extent_end;
1035
1036 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1037 return 0;
1038
1039 btrfs_item_key_to_cpu(leaf, &key, slot);
1040 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1041 return 0;
1042
1043 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1044 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1045 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1046 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1047 btrfs_file_extent_compression(leaf, fi) ||
1048 btrfs_file_extent_encryption(leaf, fi) ||
1049 btrfs_file_extent_other_encoding(leaf, fi))
1050 return 0;
1051
1052 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1053 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1054 return 0;
1055
1056 *start = key.offset;
1057 *end = extent_end;
1058 return 1;
1059 }
1060
1061 /*
1062 * Mark extent in the range start - end as written.
1063 *
1064 * This changes extent type from 'pre-allocated' to 'regular'. If only
1065 * part of extent is marked as written, the extent will be split into
1066 * two or three.
1067 */
1068 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1069 struct inode *inode, u64 start, u64 end)
1070 {
1071 struct btrfs_root *root = BTRFS_I(inode)->root;
1072 struct extent_buffer *leaf;
1073 struct btrfs_path *path;
1074 struct btrfs_file_extent_item *fi;
1075 struct btrfs_key key;
1076 struct btrfs_key new_key;
1077 u64 bytenr;
1078 u64 num_bytes;
1079 u64 extent_end;
1080 u64 orig_offset;
1081 u64 other_start;
1082 u64 other_end;
1083 u64 split;
1084 int del_nr = 0;
1085 int del_slot = 0;
1086 int recow;
1087 int ret;
1088 u64 ino = btrfs_ino(inode);
1089
1090 path = btrfs_alloc_path();
1091 if (!path)
1092 return -ENOMEM;
1093 again:
1094 recow = 0;
1095 split = start;
1096 key.objectid = ino;
1097 key.type = BTRFS_EXTENT_DATA_KEY;
1098 key.offset = split;
1099
1100 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1101 if (ret < 0)
1102 goto out;
1103 if (ret > 0 && path->slots[0] > 0)
1104 path->slots[0]--;
1105
1106 leaf = path->nodes[0];
1107 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1108 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1109 fi = btrfs_item_ptr(leaf, path->slots[0],
1110 struct btrfs_file_extent_item);
1111 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1112 BTRFS_FILE_EXTENT_PREALLOC);
1113 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1114 BUG_ON(key.offset > start || extent_end < end);
1115
1116 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1117 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1118 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1119 memcpy(&new_key, &key, sizeof(new_key));
1120
1121 if (start == key.offset && end < extent_end) {
1122 other_start = 0;
1123 other_end = start;
1124 if (extent_mergeable(leaf, path->slots[0] - 1,
1125 ino, bytenr, orig_offset,
1126 &other_start, &other_end)) {
1127 new_key.offset = end;
1128 btrfs_set_item_key_safe(root, path, &new_key);
1129 fi = btrfs_item_ptr(leaf, path->slots[0],
1130 struct btrfs_file_extent_item);
1131 btrfs_set_file_extent_generation(leaf, fi,
1132 trans->transid);
1133 btrfs_set_file_extent_num_bytes(leaf, fi,
1134 extent_end - end);
1135 btrfs_set_file_extent_offset(leaf, fi,
1136 end - orig_offset);
1137 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1138 struct btrfs_file_extent_item);
1139 btrfs_set_file_extent_generation(leaf, fi,
1140 trans->transid);
1141 btrfs_set_file_extent_num_bytes(leaf, fi,
1142 end - other_start);
1143 btrfs_mark_buffer_dirty(leaf);
1144 goto out;
1145 }
1146 }
1147
1148 if (start > key.offset && end == extent_end) {
1149 other_start = end;
1150 other_end = 0;
1151 if (extent_mergeable(leaf, path->slots[0] + 1,
1152 ino, bytenr, orig_offset,
1153 &other_start, &other_end)) {
1154 fi = btrfs_item_ptr(leaf, path->slots[0],
1155 struct btrfs_file_extent_item);
1156 btrfs_set_file_extent_num_bytes(leaf, fi,
1157 start - key.offset);
1158 btrfs_set_file_extent_generation(leaf, fi,
1159 trans->transid);
1160 path->slots[0]++;
1161 new_key.offset = start;
1162 btrfs_set_item_key_safe(root, path, &new_key);
1163
1164 fi = btrfs_item_ptr(leaf, path->slots[0],
1165 struct btrfs_file_extent_item);
1166 btrfs_set_file_extent_generation(leaf, fi,
1167 trans->transid);
1168 btrfs_set_file_extent_num_bytes(leaf, fi,
1169 other_end - start);
1170 btrfs_set_file_extent_offset(leaf, fi,
1171 start - orig_offset);
1172 btrfs_mark_buffer_dirty(leaf);
1173 goto out;
1174 }
1175 }
1176
1177 while (start > key.offset || end < extent_end) {
1178 if (key.offset == start)
1179 split = end;
1180
1181 new_key.offset = split;
1182 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1183 if (ret == -EAGAIN) {
1184 btrfs_release_path(path);
1185 goto again;
1186 }
1187 if (ret < 0) {
1188 btrfs_abort_transaction(trans, root, ret);
1189 goto out;
1190 }
1191
1192 leaf = path->nodes[0];
1193 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1194 struct btrfs_file_extent_item);
1195 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1196 btrfs_set_file_extent_num_bytes(leaf, fi,
1197 split - key.offset);
1198
1199 fi = btrfs_item_ptr(leaf, path->slots[0],
1200 struct btrfs_file_extent_item);
1201
1202 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1203 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1204 btrfs_set_file_extent_num_bytes(leaf, fi,
1205 extent_end - split);
1206 btrfs_mark_buffer_dirty(leaf);
1207
1208 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1209 root->root_key.objectid,
1210 ino, orig_offset, 1);
1211 BUG_ON(ret); /* -ENOMEM */
1212
1213 if (split == start) {
1214 key.offset = start;
1215 } else {
1216 BUG_ON(start != key.offset);
1217 path->slots[0]--;
1218 extent_end = end;
1219 }
1220 recow = 1;
1221 }
1222
1223 other_start = end;
1224 other_end = 0;
1225 if (extent_mergeable(leaf, path->slots[0] + 1,
1226 ino, bytenr, orig_offset,
1227 &other_start, &other_end)) {
1228 if (recow) {
1229 btrfs_release_path(path);
1230 goto again;
1231 }
1232 extent_end = other_end;
1233 del_slot = path->slots[0] + 1;
1234 del_nr++;
1235 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1236 0, root->root_key.objectid,
1237 ino, orig_offset, 0);
1238 BUG_ON(ret); /* -ENOMEM */
1239 }
1240 other_start = 0;
1241 other_end = start;
1242 if (extent_mergeable(leaf, path->slots[0] - 1,
1243 ino, bytenr, orig_offset,
1244 &other_start, &other_end)) {
1245 if (recow) {
1246 btrfs_release_path(path);
1247 goto again;
1248 }
1249 key.offset = other_start;
1250 del_slot = path->slots[0];
1251 del_nr++;
1252 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1253 0, root->root_key.objectid,
1254 ino, orig_offset, 0);
1255 BUG_ON(ret); /* -ENOMEM */
1256 }
1257 if (del_nr == 0) {
1258 fi = btrfs_item_ptr(leaf, path->slots[0],
1259 struct btrfs_file_extent_item);
1260 btrfs_set_file_extent_type(leaf, fi,
1261 BTRFS_FILE_EXTENT_REG);
1262 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1263 btrfs_mark_buffer_dirty(leaf);
1264 } else {
1265 fi = btrfs_item_ptr(leaf, del_slot - 1,
1266 struct btrfs_file_extent_item);
1267 btrfs_set_file_extent_type(leaf, fi,
1268 BTRFS_FILE_EXTENT_REG);
1269 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1270 btrfs_set_file_extent_num_bytes(leaf, fi,
1271 extent_end - key.offset);
1272 btrfs_mark_buffer_dirty(leaf);
1273
1274 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1275 if (ret < 0) {
1276 btrfs_abort_transaction(trans, root, ret);
1277 goto out;
1278 }
1279 }
1280 out:
1281 btrfs_free_path(path);
1282 return 0;
1283 }
1284
1285 /*
1286 * on error we return an unlocked page and the error value
1287 * on success we return a locked page and 0
1288 */
1289 static int prepare_uptodate_page(struct page *page, u64 pos,
1290 bool force_uptodate)
1291 {
1292 int ret = 0;
1293
1294 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1295 !PageUptodate(page)) {
1296 ret = btrfs_readpage(NULL, page);
1297 if (ret)
1298 return ret;
1299 lock_page(page);
1300 if (!PageUptodate(page)) {
1301 unlock_page(page);
1302 return -EIO;
1303 }
1304 }
1305 return 0;
1306 }
1307
1308 /*
1309 * this just gets pages into the page cache and locks them down.
1310 */
1311 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1312 size_t num_pages, loff_t pos,
1313 size_t write_bytes, bool force_uptodate)
1314 {
1315 int i;
1316 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1317 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1318 int err = 0;
1319 int faili;
1320
1321 for (i = 0; i < num_pages; i++) {
1322 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1323 mask | __GFP_WRITE);
1324 if (!pages[i]) {
1325 faili = i - 1;
1326 err = -ENOMEM;
1327 goto fail;
1328 }
1329
1330 if (i == 0)
1331 err = prepare_uptodate_page(pages[i], pos,
1332 force_uptodate);
1333 if (i == num_pages - 1)
1334 err = prepare_uptodate_page(pages[i],
1335 pos + write_bytes, false);
1336 if (err) {
1337 page_cache_release(pages[i]);
1338 faili = i - 1;
1339 goto fail;
1340 }
1341 wait_on_page_writeback(pages[i]);
1342 }
1343
1344 return 0;
1345 fail:
1346 while (faili >= 0) {
1347 unlock_page(pages[faili]);
1348 page_cache_release(pages[faili]);
1349 faili--;
1350 }
1351 return err;
1352
1353 }
1354
1355 /*
1356 * This function locks the extent and properly waits for data=ordered extents
1357 * to finish before allowing the pages to be modified if need.
1358 *
1359 * The return value:
1360 * 1 - the extent is locked
1361 * 0 - the extent is not locked, and everything is OK
1362 * -EAGAIN - need re-prepare the pages
1363 * the other < 0 number - Something wrong happens
1364 */
1365 static noinline int
1366 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1367 size_t num_pages, loff_t pos,
1368 u64 *lockstart, u64 *lockend,
1369 struct extent_state **cached_state)
1370 {
1371 u64 start_pos;
1372 u64 last_pos;
1373 int i;
1374 int ret = 0;
1375
1376 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1377 last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
1378
1379 if (start_pos < inode->i_size) {
1380 struct btrfs_ordered_extent *ordered;
1381 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1382 start_pos, last_pos, 0, cached_state);
1383 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1384 last_pos - start_pos + 1);
1385 if (ordered &&
1386 ordered->file_offset + ordered->len > start_pos &&
1387 ordered->file_offset <= last_pos) {
1388 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1389 start_pos, last_pos,
1390 cached_state, GFP_NOFS);
1391 for (i = 0; i < num_pages; i++) {
1392 unlock_page(pages[i]);
1393 page_cache_release(pages[i]);
1394 }
1395 btrfs_start_ordered_extent(inode, ordered, 1);
1396 btrfs_put_ordered_extent(ordered);
1397 return -EAGAIN;
1398 }
1399 if (ordered)
1400 btrfs_put_ordered_extent(ordered);
1401
1402 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1403 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1404 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1405 0, 0, cached_state, GFP_NOFS);
1406 *lockstart = start_pos;
1407 *lockend = last_pos;
1408 ret = 1;
1409 }
1410
1411 for (i = 0; i < num_pages; i++) {
1412 if (clear_page_dirty_for_io(pages[i]))
1413 account_page_redirty(pages[i]);
1414 set_page_extent_mapped(pages[i]);
1415 WARN_ON(!PageLocked(pages[i]));
1416 }
1417
1418 return ret;
1419 }
1420
1421 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1422 size_t *write_bytes)
1423 {
1424 struct btrfs_root *root = BTRFS_I(inode)->root;
1425 struct btrfs_ordered_extent *ordered;
1426 u64 lockstart, lockend;
1427 u64 num_bytes;
1428 int ret;
1429
1430 ret = btrfs_start_nocow_write(root);
1431 if (!ret)
1432 return -ENOSPC;
1433
1434 lockstart = round_down(pos, root->sectorsize);
1435 lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1436
1437 while (1) {
1438 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1439 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1440 lockend - lockstart + 1);
1441 if (!ordered) {
1442 break;
1443 }
1444 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1445 btrfs_start_ordered_extent(inode, ordered, 1);
1446 btrfs_put_ordered_extent(ordered);
1447 }
1448
1449 num_bytes = lockend - lockstart + 1;
1450 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1451 if (ret <= 0) {
1452 ret = 0;
1453 btrfs_end_nocow_write(root);
1454 } else {
1455 *write_bytes = min_t(size_t, *write_bytes ,
1456 num_bytes - pos + lockstart);
1457 }
1458
1459 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1460
1461 return ret;
1462 }
1463
1464 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1465 struct iov_iter *i,
1466 loff_t pos)
1467 {
1468 struct inode *inode = file_inode(file);
1469 struct btrfs_root *root = BTRFS_I(inode)->root;
1470 struct page **pages = NULL;
1471 struct extent_state *cached_state = NULL;
1472 u64 release_bytes = 0;
1473 u64 lockstart;
1474 u64 lockend;
1475 unsigned long first_index;
1476 size_t num_written = 0;
1477 int nrptrs;
1478 int ret = 0;
1479 bool only_release_metadata = false;
1480 bool force_page_uptodate = false;
1481 bool need_unlock;
1482
1483 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1484 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1485 (sizeof(struct page *)));
1486 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1487 nrptrs = max(nrptrs, 8);
1488 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1489 if (!pages)
1490 return -ENOMEM;
1491
1492 first_index = pos >> PAGE_CACHE_SHIFT;
1493
1494 while (iov_iter_count(i) > 0) {
1495 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1496 size_t write_bytes = min(iov_iter_count(i),
1497 nrptrs * (size_t)PAGE_CACHE_SIZE -
1498 offset);
1499 size_t num_pages = (write_bytes + offset +
1500 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1501 size_t reserve_bytes;
1502 size_t dirty_pages;
1503 size_t copied;
1504
1505 WARN_ON(num_pages > nrptrs);
1506
1507 /*
1508 * Fault pages before locking them in prepare_pages
1509 * to avoid recursive lock
1510 */
1511 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1512 ret = -EFAULT;
1513 break;
1514 }
1515
1516 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1517 ret = btrfs_check_data_free_space(inode, reserve_bytes);
1518 if (ret == -ENOSPC &&
1519 (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1520 BTRFS_INODE_PREALLOC))) {
1521 ret = check_can_nocow(inode, pos, &write_bytes);
1522 if (ret > 0) {
1523 only_release_metadata = true;
1524 /*
1525 * our prealloc extent may be smaller than
1526 * write_bytes, so scale down.
1527 */
1528 num_pages = (write_bytes + offset +
1529 PAGE_CACHE_SIZE - 1) >>
1530 PAGE_CACHE_SHIFT;
1531 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1532 ret = 0;
1533 } else {
1534 ret = -ENOSPC;
1535 }
1536 }
1537
1538 if (ret)
1539 break;
1540
1541 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1542 if (ret) {
1543 if (!only_release_metadata)
1544 btrfs_free_reserved_data_space(inode,
1545 reserve_bytes);
1546 else
1547 btrfs_end_nocow_write(root);
1548 break;
1549 }
1550
1551 release_bytes = reserve_bytes;
1552 need_unlock = false;
1553 again:
1554 /*
1555 * This is going to setup the pages array with the number of
1556 * pages we want, so we don't really need to worry about the
1557 * contents of pages from loop to loop
1558 */
1559 ret = prepare_pages(inode, pages, num_pages,
1560 pos, write_bytes,
1561 force_page_uptodate);
1562 if (ret)
1563 break;
1564
1565 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1566 pos, &lockstart, &lockend,
1567 &cached_state);
1568 if (ret < 0) {
1569 if (ret == -EAGAIN)
1570 goto again;
1571 break;
1572 } else if (ret > 0) {
1573 need_unlock = true;
1574 ret = 0;
1575 }
1576
1577 copied = btrfs_copy_from_user(pos, num_pages,
1578 write_bytes, pages, i);
1579
1580 /*
1581 * if we have trouble faulting in the pages, fall
1582 * back to one page at a time
1583 */
1584 if (copied < write_bytes)
1585 nrptrs = 1;
1586
1587 if (copied == 0) {
1588 force_page_uptodate = true;
1589 dirty_pages = 0;
1590 } else {
1591 force_page_uptodate = false;
1592 dirty_pages = (copied + offset +
1593 PAGE_CACHE_SIZE - 1) >>
1594 PAGE_CACHE_SHIFT;
1595 }
1596
1597 /*
1598 * If we had a short copy we need to release the excess delaloc
1599 * bytes we reserved. We need to increment outstanding_extents
1600 * because btrfs_delalloc_release_space will decrement it, but
1601 * we still have an outstanding extent for the chunk we actually
1602 * managed to copy.
1603 */
1604 if (num_pages > dirty_pages) {
1605 release_bytes = (num_pages - dirty_pages) <<
1606 PAGE_CACHE_SHIFT;
1607 if (copied > 0) {
1608 spin_lock(&BTRFS_I(inode)->lock);
1609 BTRFS_I(inode)->outstanding_extents++;
1610 spin_unlock(&BTRFS_I(inode)->lock);
1611 }
1612 if (only_release_metadata)
1613 btrfs_delalloc_release_metadata(inode,
1614 release_bytes);
1615 else
1616 btrfs_delalloc_release_space(inode,
1617 release_bytes);
1618 }
1619
1620 release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
1621
1622 if (copied > 0)
1623 ret = btrfs_dirty_pages(root, inode, pages,
1624 dirty_pages, pos, copied,
1625 NULL);
1626 if (need_unlock)
1627 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1628 lockstart, lockend, &cached_state,
1629 GFP_NOFS);
1630 if (ret) {
1631 btrfs_drop_pages(pages, num_pages);
1632 break;
1633 }
1634
1635 release_bytes = 0;
1636 if (only_release_metadata)
1637 btrfs_end_nocow_write(root);
1638
1639 if (only_release_metadata && copied > 0) {
1640 u64 lockstart = round_down(pos, root->sectorsize);
1641 u64 lockend = lockstart +
1642 (dirty_pages << PAGE_CACHE_SHIFT) - 1;
1643
1644 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1645 lockend, EXTENT_NORESERVE, NULL,
1646 NULL, GFP_NOFS);
1647 only_release_metadata = false;
1648 }
1649
1650 btrfs_drop_pages(pages, num_pages);
1651
1652 cond_resched();
1653
1654 balance_dirty_pages_ratelimited(inode->i_mapping);
1655 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1656 btrfs_btree_balance_dirty(root);
1657
1658 pos += copied;
1659 num_written += copied;
1660 }
1661
1662 kfree(pages);
1663
1664 if (release_bytes) {
1665 if (only_release_metadata) {
1666 btrfs_end_nocow_write(root);
1667 btrfs_delalloc_release_metadata(inode, release_bytes);
1668 } else {
1669 btrfs_delalloc_release_space(inode, release_bytes);
1670 }
1671 }
1672
1673 return num_written ? num_written : ret;
1674 }
1675
1676 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1677 const struct iovec *iov,
1678 unsigned long nr_segs, loff_t pos,
1679 size_t count, size_t ocount)
1680 {
1681 struct file *file = iocb->ki_filp;
1682 struct iov_iter i;
1683 ssize_t written;
1684 ssize_t written_buffered;
1685 loff_t endbyte;
1686 int err;
1687
1688 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
1689 count, ocount);
1690
1691 if (written < 0 || written == count)
1692 return written;
1693
1694 pos += written;
1695 count -= written;
1696 iov_iter_init(&i, iov, nr_segs, count, written);
1697 written_buffered = __btrfs_buffered_write(file, &i, pos);
1698 if (written_buffered < 0) {
1699 err = written_buffered;
1700 goto out;
1701 }
1702 endbyte = pos + written_buffered - 1;
1703 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1704 if (err)
1705 goto out;
1706 written += written_buffered;
1707 iocb->ki_pos = pos + written_buffered;
1708 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1709 endbyte >> PAGE_CACHE_SHIFT);
1710 out:
1711 return written ? written : err;
1712 }
1713
1714 static void update_time_for_write(struct inode *inode)
1715 {
1716 struct timespec now;
1717
1718 if (IS_NOCMTIME(inode))
1719 return;
1720
1721 now = current_fs_time(inode->i_sb);
1722 if (!timespec_equal(&inode->i_mtime, &now))
1723 inode->i_mtime = now;
1724
1725 if (!timespec_equal(&inode->i_ctime, &now))
1726 inode->i_ctime = now;
1727
1728 if (IS_I_VERSION(inode))
1729 inode_inc_iversion(inode);
1730 }
1731
1732 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1733 const struct iovec *iov,
1734 unsigned long nr_segs, loff_t pos)
1735 {
1736 struct file *file = iocb->ki_filp;
1737 struct inode *inode = file_inode(file);
1738 struct btrfs_root *root = BTRFS_I(inode)->root;
1739 u64 start_pos;
1740 u64 end_pos;
1741 ssize_t num_written = 0;
1742 ssize_t err = 0;
1743 size_t count, ocount;
1744 bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1745
1746 mutex_lock(&inode->i_mutex);
1747
1748 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1749 if (err) {
1750 mutex_unlock(&inode->i_mutex);
1751 goto out;
1752 }
1753 count = ocount;
1754
1755 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1756 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1757 if (err) {
1758 mutex_unlock(&inode->i_mutex);
1759 goto out;
1760 }
1761
1762 if (count == 0) {
1763 mutex_unlock(&inode->i_mutex);
1764 goto out;
1765 }
1766
1767 err = file_remove_suid(file);
1768 if (err) {
1769 mutex_unlock(&inode->i_mutex);
1770 goto out;
1771 }
1772
1773 /*
1774 * If BTRFS flips readonly due to some impossible error
1775 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1776 * although we have opened a file as writable, we have
1777 * to stop this write operation to ensure FS consistency.
1778 */
1779 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1780 mutex_unlock(&inode->i_mutex);
1781 err = -EROFS;
1782 goto out;
1783 }
1784
1785 /*
1786 * We reserve space for updating the inode when we reserve space for the
1787 * extent we are going to write, so we will enospc out there. We don't
1788 * need to start yet another transaction to update the inode as we will
1789 * update the inode when we finish writing whatever data we write.
1790 */
1791 update_time_for_write(inode);
1792
1793 start_pos = round_down(pos, root->sectorsize);
1794 if (start_pos > i_size_read(inode)) {
1795 /* Expand hole size to cover write data, preventing empty gap */
1796 end_pos = round_up(pos + count, root->sectorsize);
1797 err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
1798 if (err) {
1799 mutex_unlock(&inode->i_mutex);
1800 goto out;
1801 }
1802 }
1803
1804 if (sync)
1805 atomic_inc(&BTRFS_I(inode)->sync_writers);
1806
1807 if (unlikely(file->f_flags & O_DIRECT)) {
1808 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1809 pos, count, ocount);
1810 } else {
1811 struct iov_iter i;
1812
1813 iov_iter_init(&i, iov, nr_segs, count, num_written);
1814
1815 num_written = __btrfs_buffered_write(file, &i, pos);
1816 if (num_written > 0)
1817 iocb->ki_pos = pos + num_written;
1818 }
1819
1820 mutex_unlock(&inode->i_mutex);
1821
1822 /*
1823 * we want to make sure fsync finds this change
1824 * but we haven't joined a transaction running right now.
1825 *
1826 * Later on, someone is sure to update the inode and get the
1827 * real transid recorded.
1828 *
1829 * We set last_trans now to the fs_info generation + 1,
1830 * this will either be one more than the running transaction
1831 * or the generation used for the next transaction if there isn't
1832 * one running right now.
1833 *
1834 * We also have to set last_sub_trans to the current log transid,
1835 * otherwise subsequent syncs to a file that's been synced in this
1836 * transaction will appear to have already occured.
1837 */
1838 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1839 BTRFS_I(inode)->last_sub_trans = root->log_transid;
1840 if (num_written > 0) {
1841 err = generic_write_sync(file, pos, num_written);
1842 if (err < 0)
1843 num_written = err;
1844 }
1845
1846 if (sync)
1847 atomic_dec(&BTRFS_I(inode)->sync_writers);
1848 out:
1849 current->backing_dev_info = NULL;
1850 return num_written ? num_written : err;
1851 }
1852
1853 int btrfs_release_file(struct inode *inode, struct file *filp)
1854 {
1855 /*
1856 * ordered_data_close is set by settattr when we are about to truncate
1857 * a file from a non-zero size to a zero size. This tries to
1858 * flush down new bytes that may have been written if the
1859 * application were using truncate to replace a file in place.
1860 */
1861 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1862 &BTRFS_I(inode)->runtime_flags)) {
1863 struct btrfs_trans_handle *trans;
1864 struct btrfs_root *root = BTRFS_I(inode)->root;
1865
1866 /*
1867 * We need to block on a committing transaction to keep us from
1868 * throwing a ordered operation on to the list and causing
1869 * something like sync to deadlock trying to flush out this
1870 * inode.
1871 */
1872 trans = btrfs_start_transaction(root, 0);
1873 if (IS_ERR(trans))
1874 return PTR_ERR(trans);
1875 btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
1876 btrfs_end_transaction(trans, root);
1877 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1878 filemap_flush(inode->i_mapping);
1879 }
1880 if (filp->private_data)
1881 btrfs_ioctl_trans_end(filp);
1882 return 0;
1883 }
1884
1885 /*
1886 * fsync call for both files and directories. This logs the inode into
1887 * the tree log instead of forcing full commits whenever possible.
1888 *
1889 * It needs to call filemap_fdatawait so that all ordered extent updates are
1890 * in the metadata btree are up to date for copying to the log.
1891 *
1892 * It drops the inode mutex before doing the tree log commit. This is an
1893 * important optimization for directories because holding the mutex prevents
1894 * new operations on the dir while we write to disk.
1895 */
1896 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1897 {
1898 struct dentry *dentry = file->f_path.dentry;
1899 struct inode *inode = dentry->d_inode;
1900 struct btrfs_root *root = BTRFS_I(inode)->root;
1901 struct btrfs_trans_handle *trans;
1902 struct btrfs_log_ctx ctx;
1903 int ret = 0;
1904 bool full_sync = 0;
1905
1906 trace_btrfs_sync_file(file, datasync);
1907
1908 /*
1909 * We write the dirty pages in the range and wait until they complete
1910 * out of the ->i_mutex. If so, we can flush the dirty pages by
1911 * multi-task, and make the performance up. See
1912 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1913 */
1914 atomic_inc(&BTRFS_I(inode)->sync_writers);
1915 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1916 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1917 &BTRFS_I(inode)->runtime_flags))
1918 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1919 atomic_dec(&BTRFS_I(inode)->sync_writers);
1920 if (ret)
1921 return ret;
1922
1923 mutex_lock(&inode->i_mutex);
1924
1925 /*
1926 * We flush the dirty pages again to avoid some dirty pages in the
1927 * range being left.
1928 */
1929 atomic_inc(&root->log_batch);
1930 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1931 &BTRFS_I(inode)->runtime_flags);
1932 if (full_sync) {
1933 ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
1934 if (ret) {
1935 mutex_unlock(&inode->i_mutex);
1936 goto out;
1937 }
1938 }
1939 atomic_inc(&root->log_batch);
1940
1941 /*
1942 * check the transaction that last modified this inode
1943 * and see if its already been committed
1944 */
1945 if (!BTRFS_I(inode)->last_trans) {
1946 mutex_unlock(&inode->i_mutex);
1947 goto out;
1948 }
1949
1950 /*
1951 * if the last transaction that changed this file was before
1952 * the current transaction, we can bail out now without any
1953 * syncing
1954 */
1955 smp_mb();
1956 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1957 BTRFS_I(inode)->last_trans <=
1958 root->fs_info->last_trans_committed) {
1959 BTRFS_I(inode)->last_trans = 0;
1960
1961 /*
1962 * We'v had everything committed since the last time we were
1963 * modified so clear this flag in case it was set for whatever
1964 * reason, it's no longer relevant.
1965 */
1966 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1967 &BTRFS_I(inode)->runtime_flags);
1968 mutex_unlock(&inode->i_mutex);
1969 goto out;
1970 }
1971
1972 /*
1973 * ok we haven't committed the transaction yet, lets do a commit
1974 */
1975 if (file->private_data)
1976 btrfs_ioctl_trans_end(file);
1977
1978 /*
1979 * We use start here because we will need to wait on the IO to complete
1980 * in btrfs_sync_log, which could require joining a transaction (for
1981 * example checking cross references in the nocow path). If we use join
1982 * here we could get into a situation where we're waiting on IO to
1983 * happen that is blocked on a transaction trying to commit. With start
1984 * we inc the extwriter counter, so we wait for all extwriters to exit
1985 * before we start blocking join'ers. This comment is to keep somebody
1986 * from thinking they are super smart and changing this to
1987 * btrfs_join_transaction *cough*Josef*cough*.
1988 */
1989 trans = btrfs_start_transaction(root, 0);
1990 if (IS_ERR(trans)) {
1991 ret = PTR_ERR(trans);
1992 mutex_unlock(&inode->i_mutex);
1993 goto out;
1994 }
1995 trans->sync = true;
1996
1997 btrfs_init_log_ctx(&ctx);
1998
1999 ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
2000 if (ret < 0) {
2001 /* Fallthrough and commit/free transaction. */
2002 ret = 1;
2003 }
2004
2005 /* we've logged all the items and now have a consistent
2006 * version of the file in the log. It is possible that
2007 * someone will come in and modify the file, but that's
2008 * fine because the log is consistent on disk, and we
2009 * have references to all of the file's extents
2010 *
2011 * It is possible that someone will come in and log the
2012 * file again, but that will end up using the synchronization
2013 * inside btrfs_sync_log to keep things safe.
2014 */
2015 mutex_unlock(&inode->i_mutex);
2016
2017 if (ret != BTRFS_NO_LOG_SYNC) {
2018 if (!ret) {
2019 ret = btrfs_sync_log(trans, root, &ctx);
2020 if (!ret) {
2021 ret = btrfs_end_transaction(trans, root);
2022 goto out;
2023 }
2024 }
2025 if (!full_sync) {
2026 ret = btrfs_wait_ordered_range(inode, start,
2027 end - start + 1);
2028 if (ret)
2029 goto out;
2030 }
2031 ret = btrfs_commit_transaction(trans, root);
2032 } else {
2033 ret = btrfs_end_transaction(trans, root);
2034 }
2035 out:
2036 return ret > 0 ? -EIO : ret;
2037 }
2038
2039 static const struct vm_operations_struct btrfs_file_vm_ops = {
2040 .fault = filemap_fault,
2041 .map_pages = filemap_map_pages,
2042 .page_mkwrite = btrfs_page_mkwrite,
2043 .remap_pages = generic_file_remap_pages,
2044 };
2045
2046 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2047 {
2048 struct address_space *mapping = filp->f_mapping;
2049
2050 if (!mapping->a_ops->readpage)
2051 return -ENOEXEC;
2052
2053 file_accessed(filp);
2054 vma->vm_ops = &btrfs_file_vm_ops;
2055
2056 return 0;
2057 }
2058
2059 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2060 int slot, u64 start, u64 end)
2061 {
2062 struct btrfs_file_extent_item *fi;
2063 struct btrfs_key key;
2064
2065 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2066 return 0;
2067
2068 btrfs_item_key_to_cpu(leaf, &key, slot);
2069 if (key.objectid != btrfs_ino(inode) ||
2070 key.type != BTRFS_EXTENT_DATA_KEY)
2071 return 0;
2072
2073 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2074
2075 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2076 return 0;
2077
2078 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2079 return 0;
2080
2081 if (key.offset == end)
2082 return 1;
2083 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2084 return 1;
2085 return 0;
2086 }
2087
2088 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2089 struct btrfs_path *path, u64 offset, u64 end)
2090 {
2091 struct btrfs_root *root = BTRFS_I(inode)->root;
2092 struct extent_buffer *leaf;
2093 struct btrfs_file_extent_item *fi;
2094 struct extent_map *hole_em;
2095 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2096 struct btrfs_key key;
2097 int ret;
2098
2099 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2100 goto out;
2101
2102 key.objectid = btrfs_ino(inode);
2103 key.type = BTRFS_EXTENT_DATA_KEY;
2104 key.offset = offset;
2105
2106 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2107 if (ret < 0)
2108 return ret;
2109 BUG_ON(!ret);
2110
2111 leaf = path->nodes[0];
2112 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2113 u64 num_bytes;
2114
2115 path->slots[0]--;
2116 fi = btrfs_item_ptr(leaf, path->slots[0],
2117 struct btrfs_file_extent_item);
2118 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2119 end - offset;
2120 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2121 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2122 btrfs_set_file_extent_offset(leaf, fi, 0);
2123 btrfs_mark_buffer_dirty(leaf);
2124 goto out;
2125 }
2126
2127 if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
2128 u64 num_bytes;
2129
2130 path->slots[0]++;
2131 key.offset = offset;
2132 btrfs_set_item_key_safe(root, path, &key);
2133 fi = btrfs_item_ptr(leaf, path->slots[0],
2134 struct btrfs_file_extent_item);
2135 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2136 offset;
2137 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2138 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2139 btrfs_set_file_extent_offset(leaf, fi, 0);
2140 btrfs_mark_buffer_dirty(leaf);
2141 goto out;
2142 }
2143 btrfs_release_path(path);
2144
2145 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2146 0, 0, end - offset, 0, end - offset,
2147 0, 0, 0);
2148 if (ret)
2149 return ret;
2150
2151 out:
2152 btrfs_release_path(path);
2153
2154 hole_em = alloc_extent_map();
2155 if (!hole_em) {
2156 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2157 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2158 &BTRFS_I(inode)->runtime_flags);
2159 } else {
2160 hole_em->start = offset;
2161 hole_em->len = end - offset;
2162 hole_em->ram_bytes = hole_em->len;
2163 hole_em->orig_start = offset;
2164
2165 hole_em->block_start = EXTENT_MAP_HOLE;
2166 hole_em->block_len = 0;
2167 hole_em->orig_block_len = 0;
2168 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2169 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2170 hole_em->generation = trans->transid;
2171
2172 do {
2173 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2174 write_lock(&em_tree->lock);
2175 ret = add_extent_mapping(em_tree, hole_em, 1);
2176 write_unlock(&em_tree->lock);
2177 } while (ret == -EEXIST);
2178 free_extent_map(hole_em);
2179 if (ret)
2180 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2181 &BTRFS_I(inode)->runtime_flags);
2182 }
2183
2184 return 0;
2185 }
2186
2187 /*
2188 * Find a hole extent on given inode and change start/len to the end of hole
2189 * extent.(hole/vacuum extent whose em->start <= start &&
2190 * em->start + em->len > start)
2191 * When a hole extent is found, return 1 and modify start/len.
2192 */
2193 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2194 {
2195 struct extent_map *em;
2196 int ret = 0;
2197
2198 em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2199 if (IS_ERR_OR_NULL(em)) {
2200 if (!em)
2201 ret = -ENOMEM;
2202 else
2203 ret = PTR_ERR(em);
2204 return ret;
2205 }
2206
2207 /* Hole or vacuum extent(only exists in no-hole mode) */
2208 if (em->block_start == EXTENT_MAP_HOLE) {
2209 ret = 1;
2210 *len = em->start + em->len > *start + *len ?
2211 0 : *start + *len - em->start - em->len;
2212 *start = em->start + em->len;
2213 }
2214 free_extent_map(em);
2215 return ret;
2216 }
2217
2218 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2219 {
2220 struct btrfs_root *root = BTRFS_I(inode)->root;
2221 struct extent_state *cached_state = NULL;
2222 struct btrfs_path *path;
2223 struct btrfs_block_rsv *rsv;
2224 struct btrfs_trans_handle *trans;
2225 u64 lockstart;
2226 u64 lockend;
2227 u64 tail_start;
2228 u64 tail_len;
2229 u64 orig_start = offset;
2230 u64 cur_offset;
2231 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2232 u64 drop_end;
2233 int ret = 0;
2234 int err = 0;
2235 int rsv_count;
2236 bool same_page;
2237 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2238 u64 ino_size;
2239
2240 ret = btrfs_wait_ordered_range(inode, offset, len);
2241 if (ret)
2242 return ret;
2243
2244 mutex_lock(&inode->i_mutex);
2245 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
2246 ret = find_first_non_hole(inode, &offset, &len);
2247 if (ret < 0)
2248 goto out_only_mutex;
2249 if (ret && !len) {
2250 /* Already in a large hole */
2251 ret = 0;
2252 goto out_only_mutex;
2253 }
2254
2255 lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize);
2256 lockend = round_down(offset + len,
2257 BTRFS_I(inode)->root->sectorsize) - 1;
2258 same_page = ((offset >> PAGE_CACHE_SHIFT) ==
2259 ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2260
2261 /*
2262 * We needn't truncate any page which is beyond the end of the file
2263 * because we are sure there is no data there.
2264 */
2265 /*
2266 * Only do this if we are in the same page and we aren't doing the
2267 * entire page.
2268 */
2269 if (same_page && len < PAGE_CACHE_SIZE) {
2270 if (offset < ino_size)
2271 ret = btrfs_truncate_page(inode, offset, len, 0);
2272 goto out_only_mutex;
2273 }
2274
2275 /* zero back part of the first page */
2276 if (offset < ino_size) {
2277 ret = btrfs_truncate_page(inode, offset, 0, 0);
2278 if (ret) {
2279 mutex_unlock(&inode->i_mutex);
2280 return ret;
2281 }
2282 }
2283
2284 /* Check the aligned pages after the first unaligned page,
2285 * if offset != orig_start, which means the first unaligned page
2286 * including serveral following pages are already in holes,
2287 * the extra check can be skipped */
2288 if (offset == orig_start) {
2289 /* after truncate page, check hole again */
2290 len = offset + len - lockstart;
2291 offset = lockstart;
2292 ret = find_first_non_hole(inode, &offset, &len);
2293 if (ret < 0)
2294 goto out_only_mutex;
2295 if (ret && !len) {
2296 ret = 0;
2297 goto out_only_mutex;
2298 }
2299 lockstart = offset;
2300 }
2301
2302 /* Check the tail unaligned part is in a hole */
2303 tail_start = lockend + 1;
2304 tail_len = offset + len - tail_start;
2305 if (tail_len) {
2306 ret = find_first_non_hole(inode, &tail_start, &tail_len);
2307 if (unlikely(ret < 0))
2308 goto out_only_mutex;
2309 if (!ret) {
2310 /* zero the front end of the last page */
2311 if (tail_start + tail_len < ino_size) {
2312 ret = btrfs_truncate_page(inode,
2313 tail_start + tail_len, 0, 1);
2314 if (ret)
2315 goto out_only_mutex;
2316 }
2317 }
2318 }
2319
2320 if (lockend < lockstart) {
2321 mutex_unlock(&inode->i_mutex);
2322 return 0;
2323 }
2324
2325 while (1) {
2326 struct btrfs_ordered_extent *ordered;
2327
2328 truncate_pagecache_range(inode, lockstart, lockend);
2329
2330 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2331 0, &cached_state);
2332 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2333
2334 /*
2335 * We need to make sure we have no ordered extents in this range
2336 * and nobody raced in and read a page in this range, if we did
2337 * we need to try again.
2338 */
2339 if ((!ordered ||
2340 (ordered->file_offset + ordered->len <= lockstart ||
2341 ordered->file_offset > lockend)) &&
2342 !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2343 if (ordered)
2344 btrfs_put_ordered_extent(ordered);
2345 break;
2346 }
2347 if (ordered)
2348 btrfs_put_ordered_extent(ordered);
2349 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2350 lockend, &cached_state, GFP_NOFS);
2351 ret = btrfs_wait_ordered_range(inode, lockstart,
2352 lockend - lockstart + 1);
2353 if (ret) {
2354 mutex_unlock(&inode->i_mutex);
2355 return ret;
2356 }
2357 }
2358
2359 path = btrfs_alloc_path();
2360 if (!path) {
2361 ret = -ENOMEM;
2362 goto out;
2363 }
2364
2365 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2366 if (!rsv) {
2367 ret = -ENOMEM;
2368 goto out_free;
2369 }
2370 rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2371 rsv->failfast = 1;
2372
2373 /*
2374 * 1 - update the inode
2375 * 1 - removing the extents in the range
2376 * 1 - adding the hole extent if no_holes isn't set
2377 */
2378 rsv_count = no_holes ? 2 : 3;
2379 trans = btrfs_start_transaction(root, rsv_count);
2380 if (IS_ERR(trans)) {
2381 err = PTR_ERR(trans);
2382 goto out_free;
2383 }
2384
2385 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2386 min_size);
2387 BUG_ON(ret);
2388 trans->block_rsv = rsv;
2389
2390 cur_offset = lockstart;
2391 len = lockend - cur_offset;
2392 while (cur_offset < lockend) {
2393 ret = __btrfs_drop_extents(trans, root, inode, path,
2394 cur_offset, lockend + 1,
2395 &drop_end, 1, 0, 0, NULL);
2396 if (ret != -ENOSPC)
2397 break;
2398
2399 trans->block_rsv = &root->fs_info->trans_block_rsv;
2400
2401 if (cur_offset < ino_size) {
2402 ret = fill_holes(trans, inode, path, cur_offset,
2403 drop_end);
2404 if (ret) {
2405 err = ret;
2406 break;
2407 }
2408 }
2409
2410 cur_offset = drop_end;
2411
2412 ret = btrfs_update_inode(trans, root, inode);
2413 if (ret) {
2414 err = ret;
2415 break;
2416 }
2417
2418 btrfs_end_transaction(trans, root);
2419 btrfs_btree_balance_dirty(root);
2420
2421 trans = btrfs_start_transaction(root, rsv_count);
2422 if (IS_ERR(trans)) {
2423 ret = PTR_ERR(trans);
2424 trans = NULL;
2425 break;
2426 }
2427
2428 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2429 rsv, min_size);
2430 BUG_ON(ret); /* shouldn't happen */
2431 trans->block_rsv = rsv;
2432
2433 ret = find_first_non_hole(inode, &cur_offset, &len);
2434 if (unlikely(ret < 0))
2435 break;
2436 if (ret && !len) {
2437 ret = 0;
2438 break;
2439 }
2440 }
2441
2442 if (ret) {
2443 err = ret;
2444 goto out_trans;
2445 }
2446
2447 trans->block_rsv = &root->fs_info->trans_block_rsv;
2448 /*
2449 * Don't insert file hole extent item if it's for a range beyond eof
2450 * (because it's useless) or if it represents a 0 bytes range (when
2451 * cur_offset == drop_end).
2452 */
2453 if (cur_offset < ino_size && cur_offset < drop_end) {
2454 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2455 if (ret) {
2456 err = ret;
2457 goto out_trans;
2458 }
2459 }
2460
2461 out_trans:
2462 if (!trans)
2463 goto out_free;
2464
2465 inode_inc_iversion(inode);
2466 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2467
2468 trans->block_rsv = &root->fs_info->trans_block_rsv;
2469 ret = btrfs_update_inode(trans, root, inode);
2470 btrfs_end_transaction(trans, root);
2471 btrfs_btree_balance_dirty(root);
2472 out_free:
2473 btrfs_free_path(path);
2474 btrfs_free_block_rsv(root, rsv);
2475 out:
2476 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2477 &cached_state, GFP_NOFS);
2478 out_only_mutex:
2479 mutex_unlock(&inode->i_mutex);
2480 if (ret && !err)
2481 err = ret;
2482 return err;
2483 }
2484
2485 static long btrfs_fallocate(struct file *file, int mode,
2486 loff_t offset, loff_t len)
2487 {
2488 struct inode *inode = file_inode(file);
2489 struct extent_state *cached_state = NULL;
2490 struct btrfs_root *root = BTRFS_I(inode)->root;
2491 u64 cur_offset;
2492 u64 last_byte;
2493 u64 alloc_start;
2494 u64 alloc_end;
2495 u64 alloc_hint = 0;
2496 u64 locked_end;
2497 struct extent_map *em;
2498 int blocksize = BTRFS_I(inode)->root->sectorsize;
2499 int ret;
2500
2501 alloc_start = round_down(offset, blocksize);
2502 alloc_end = round_up(offset + len, blocksize);
2503
2504 /* Make sure we aren't being give some crap mode */
2505 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2506 return -EOPNOTSUPP;
2507
2508 if (mode & FALLOC_FL_PUNCH_HOLE)
2509 return btrfs_punch_hole(inode, offset, len);
2510
2511 /*
2512 * Make sure we have enough space before we do the
2513 * allocation.
2514 */
2515 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2516 if (ret)
2517 return ret;
2518 if (root->fs_info->quota_enabled) {
2519 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2520 if (ret)
2521 goto out_reserve_fail;
2522 }
2523
2524 mutex_lock(&inode->i_mutex);
2525 ret = inode_newsize_ok(inode, alloc_end);
2526 if (ret)
2527 goto out;
2528
2529 if (alloc_start > inode->i_size) {
2530 ret = btrfs_cont_expand(inode, i_size_read(inode),
2531 alloc_start);
2532 if (ret)
2533 goto out;
2534 } else {
2535 /*
2536 * If we are fallocating from the end of the file onward we
2537 * need to zero out the end of the page if i_size lands in the
2538 * middle of a page.
2539 */
2540 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
2541 if (ret)
2542 goto out;
2543 }
2544
2545 /*
2546 * wait for ordered IO before we have any locks. We'll loop again
2547 * below with the locks held.
2548 */
2549 ret = btrfs_wait_ordered_range(inode, alloc_start,
2550 alloc_end - alloc_start);
2551 if (ret)
2552 goto out;
2553
2554 locked_end = alloc_end - 1;
2555 while (1) {
2556 struct btrfs_ordered_extent *ordered;
2557
2558 /* the extent lock is ordered inside the running
2559 * transaction
2560 */
2561 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2562 locked_end, 0, &cached_state);
2563 ordered = btrfs_lookup_first_ordered_extent(inode,
2564 alloc_end - 1);
2565 if (ordered &&
2566 ordered->file_offset + ordered->len > alloc_start &&
2567 ordered->file_offset < alloc_end) {
2568 btrfs_put_ordered_extent(ordered);
2569 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2570 alloc_start, locked_end,
2571 &cached_state, GFP_NOFS);
2572 /*
2573 * we can't wait on the range with the transaction
2574 * running or with the extent lock held
2575 */
2576 ret = btrfs_wait_ordered_range(inode, alloc_start,
2577 alloc_end - alloc_start);
2578 if (ret)
2579 goto out;
2580 } else {
2581 if (ordered)
2582 btrfs_put_ordered_extent(ordered);
2583 break;
2584 }
2585 }
2586
2587 cur_offset = alloc_start;
2588 while (1) {
2589 u64 actual_end;
2590
2591 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2592 alloc_end - cur_offset, 0);
2593 if (IS_ERR_OR_NULL(em)) {
2594 if (!em)
2595 ret = -ENOMEM;
2596 else
2597 ret = PTR_ERR(em);
2598 break;
2599 }
2600 last_byte = min(extent_map_end(em), alloc_end);
2601 actual_end = min_t(u64, extent_map_end(em), offset + len);
2602 last_byte = ALIGN(last_byte, blocksize);
2603
2604 if (em->block_start == EXTENT_MAP_HOLE ||
2605 (cur_offset >= inode->i_size &&
2606 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2607 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2608 last_byte - cur_offset,
2609 1 << inode->i_blkbits,
2610 offset + len,
2611 &alloc_hint);
2612
2613 if (ret < 0) {
2614 free_extent_map(em);
2615 break;
2616 }
2617 } else if (actual_end > inode->i_size &&
2618 !(mode & FALLOC_FL_KEEP_SIZE)) {
2619 /*
2620 * We didn't need to allocate any more space, but we
2621 * still extended the size of the file so we need to
2622 * update i_size.
2623 */
2624 inode->i_ctime = CURRENT_TIME;
2625 i_size_write(inode, actual_end);
2626 btrfs_ordered_update_i_size(inode, actual_end, NULL);
2627 }
2628 free_extent_map(em);
2629
2630 cur_offset = last_byte;
2631 if (cur_offset >= alloc_end) {
2632 ret = 0;
2633 break;
2634 }
2635 }
2636 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2637 &cached_state, GFP_NOFS);
2638 out:
2639 mutex_unlock(&inode->i_mutex);
2640 if (root->fs_info->quota_enabled)
2641 btrfs_qgroup_free(root, alloc_end - alloc_start);
2642 out_reserve_fail:
2643 /* Let go of our reservation. */
2644 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2645 return ret;
2646 }
2647
2648 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2649 {
2650 struct btrfs_root *root = BTRFS_I(inode)->root;
2651 struct extent_map *em = NULL;
2652 struct extent_state *cached_state = NULL;
2653 u64 lockstart = *offset;
2654 u64 lockend = i_size_read(inode);
2655 u64 start = *offset;
2656 u64 len = i_size_read(inode);
2657 int ret = 0;
2658
2659 lockend = max_t(u64, root->sectorsize, lockend);
2660 if (lockend <= lockstart)
2661 lockend = lockstart + root->sectorsize;
2662
2663 lockend--;
2664 len = lockend - lockstart + 1;
2665
2666 len = max_t(u64, len, root->sectorsize);
2667 if (inode->i_size == 0)
2668 return -ENXIO;
2669
2670 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2671 &cached_state);
2672
2673 while (start < inode->i_size) {
2674 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2675 if (IS_ERR(em)) {
2676 ret = PTR_ERR(em);
2677 em = NULL;
2678 break;
2679 }
2680
2681 if (whence == SEEK_HOLE &&
2682 (em->block_start == EXTENT_MAP_HOLE ||
2683 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2684 break;
2685 else if (whence == SEEK_DATA &&
2686 (em->block_start != EXTENT_MAP_HOLE &&
2687 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2688 break;
2689
2690 start = em->start + em->len;
2691 free_extent_map(em);
2692 em = NULL;
2693 cond_resched();
2694 }
2695 free_extent_map(em);
2696 if (!ret) {
2697 if (whence == SEEK_DATA && start >= inode->i_size)
2698 ret = -ENXIO;
2699 else
2700 *offset = min_t(loff_t, start, inode->i_size);
2701 }
2702 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2703 &cached_state, GFP_NOFS);
2704 return ret;
2705 }
2706
2707 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2708 {
2709 struct inode *inode = file->f_mapping->host;
2710 int ret;
2711
2712 mutex_lock(&inode->i_mutex);
2713 switch (whence) {
2714 case SEEK_END:
2715 case SEEK_CUR:
2716 offset = generic_file_llseek(file, offset, whence);
2717 goto out;
2718 case SEEK_DATA:
2719 case SEEK_HOLE:
2720 if (offset >= i_size_read(inode)) {
2721 mutex_unlock(&inode->i_mutex);
2722 return -ENXIO;
2723 }
2724
2725 ret = find_desired_extent(inode, &offset, whence);
2726 if (ret) {
2727 mutex_unlock(&inode->i_mutex);
2728 return ret;
2729 }
2730 }
2731
2732 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2733 out:
2734 mutex_unlock(&inode->i_mutex);
2735 return offset;
2736 }
2737
2738 const struct file_operations btrfs_file_operations = {
2739 .llseek = btrfs_file_llseek,
2740 .read = do_sync_read,
2741 .write = do_sync_write,
2742 .aio_read = generic_file_aio_read,
2743 .splice_read = generic_file_splice_read,
2744 .aio_write = btrfs_file_aio_write,
2745 .mmap = btrfs_file_mmap,
2746 .open = generic_file_open,
2747 .release = btrfs_release_file,
2748 .fsync = btrfs_sync_file,
2749 .fallocate = btrfs_fallocate,
2750 .unlocked_ioctl = btrfs_ioctl,
2751 #ifdef CONFIG_COMPAT
2752 .compat_ioctl = btrfs_ioctl,
2753 #endif
2754 };
2755
2756 void btrfs_auto_defrag_exit(void)
2757 {
2758 if (btrfs_inode_defrag_cachep)
2759 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2760 }
2761
2762 int btrfs_auto_defrag_init(void)
2763 {
2764 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2765 sizeof(struct inode_defrag), 0,
2766 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2767 NULL);
2768 if (!btrfs_inode_defrag_cachep)
2769 return -ENOMEM;
2770
2771 return 0;
2772 }
This page took 0.092018 seconds and 5 git commands to generate.