2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/list_sort.h>
23 #include "transaction.h"
26 #include "print-tree.h"
32 /* magic values for the inode_only field in btrfs_log_inode:
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
42 * directory trouble cases
44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45 * log, we must force a full commit before doing an fsync of the directory
46 * where the unlink was done.
47 * ---> record transid of last unlink/rename per directory
51 * rename foo/some_dir foo2/some_dir
53 * fsync foo/some_dir/some_file
55 * The fsync above will unlink the original some_dir without recording
56 * it in its new location (foo2). After a crash, some_dir will be gone
57 * unless the fsync of some_file forces a full commit
59 * 2) we must log any new names for any file or dir that is in the fsync
60 * log. ---> check inode while renaming/linking.
62 * 2a) we must log any new names for any file or dir during rename
63 * when the directory they are being removed from was logged.
64 * ---> check inode and old parent dir during rename
66 * 2a is actually the more important variant. With the extra logging
67 * a crash might unlink the old name without recreating the new one
69 * 3) after a crash, we must go through any directories with a link count
70 * of zero and redo the rm -rf
77 * The directory f1 was fully removed from the FS, but fsync was never
78 * called on f1, only its parent dir. After a crash the rm -rf must
79 * be replayed. This must be able to recurse down the entire
80 * directory tree. The inode link count fixup code takes care of the
85 * stages for the tree walking. The first
86 * stage (0) is to only pin down the blocks we find
87 * the second stage (1) is to make sure that all the inodes
88 * we find in the log are created in the subvolume.
90 * The last stage is to deal with directories and links and extents
91 * and all the other fun semantics
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_ALL 2
97 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
98 struct btrfs_root
*root
, struct inode
*inode
,
100 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
101 struct btrfs_root
*root
,
102 struct btrfs_path
*path
, u64 objectid
);
103 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
104 struct btrfs_root
*root
,
105 struct btrfs_root
*log
,
106 struct btrfs_path
*path
,
107 u64 dirid
, int del_all
);
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
137 static int start_log_trans(struct btrfs_trans_handle
*trans
,
138 struct btrfs_root
*root
)
143 mutex_lock(&root
->log_mutex
);
144 if (root
->log_root
) {
145 if (!root
->log_start_pid
) {
146 root
->log_start_pid
= current
->pid
;
147 root
->log_multiple_pids
= false;
148 } else if (root
->log_start_pid
!= current
->pid
) {
149 root
->log_multiple_pids
= true;
152 atomic_inc(&root
->log_batch
);
153 atomic_inc(&root
->log_writers
);
154 mutex_unlock(&root
->log_mutex
);
157 root
->log_multiple_pids
= false;
158 root
->log_start_pid
= current
->pid
;
159 mutex_lock(&root
->fs_info
->tree_log_mutex
);
160 if (!root
->fs_info
->log_root_tree
) {
161 ret
= btrfs_init_log_root_tree(trans
, root
->fs_info
);
165 if (err
== 0 && !root
->log_root
) {
166 ret
= btrfs_add_log_tree(trans
, root
);
170 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
171 atomic_inc(&root
->log_batch
);
172 atomic_inc(&root
->log_writers
);
173 mutex_unlock(&root
->log_mutex
);
178 * returns 0 if there was a log transaction running and we were able
179 * to join, or returns -ENOENT if there were not transactions
182 static int join_running_log_trans(struct btrfs_root
*root
)
190 mutex_lock(&root
->log_mutex
);
191 if (root
->log_root
) {
193 atomic_inc(&root
->log_writers
);
195 mutex_unlock(&root
->log_mutex
);
200 * This either makes the current running log transaction wait
201 * until you call btrfs_end_log_trans() or it makes any future
202 * log transactions wait until you call btrfs_end_log_trans()
204 int btrfs_pin_log_trans(struct btrfs_root
*root
)
208 mutex_lock(&root
->log_mutex
);
209 atomic_inc(&root
->log_writers
);
210 mutex_unlock(&root
->log_mutex
);
215 * indicate we're done making changes to the log tree
216 * and wake up anyone waiting to do a sync
218 void btrfs_end_log_trans(struct btrfs_root
*root
)
220 if (atomic_dec_and_test(&root
->log_writers
)) {
222 if (waitqueue_active(&root
->log_writer_wait
))
223 wake_up(&root
->log_writer_wait
);
229 * the walk control struct is used to pass state down the chain when
230 * processing the log tree. The stage field tells us which part
231 * of the log tree processing we are currently doing. The others
232 * are state fields used for that specific part
234 struct walk_control
{
235 /* should we free the extent on disk when done? This is used
236 * at transaction commit time while freeing a log tree
240 /* should we write out the extent buffer? This is used
241 * while flushing the log tree to disk during a sync
245 /* should we wait for the extent buffer io to finish? Also used
246 * while flushing the log tree to disk for a sync
250 /* pin only walk, we record which extents on disk belong to the
255 /* what stage of the replay code we're currently in */
258 /* the root we are currently replaying */
259 struct btrfs_root
*replay_dest
;
261 /* the trans handle for the current replay */
262 struct btrfs_trans_handle
*trans
;
264 /* the function that gets used to process blocks we find in the
265 * tree. Note the extent_buffer might not be up to date when it is
266 * passed in, and it must be checked or read if you need the data
269 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
270 struct walk_control
*wc
, u64 gen
);
274 * process_func used to pin down extents, write them or wait on them
276 static int process_one_buffer(struct btrfs_root
*log
,
277 struct extent_buffer
*eb
,
278 struct walk_control
*wc
, u64 gen
)
281 btrfs_pin_extent_for_log_replay(log
->fs_info
->extent_root
,
284 if (btrfs_buffer_uptodate(eb
, gen
, 0)) {
286 btrfs_write_tree_block(eb
);
288 btrfs_wait_tree_block_writeback(eb
);
294 * Item overwrite used by replay and tree logging. eb, slot and key all refer
295 * to the src data we are copying out.
297 * root is the tree we are copying into, and path is a scratch
298 * path for use in this function (it should be released on entry and
299 * will be released on exit).
301 * If the key is already in the destination tree the existing item is
302 * overwritten. If the existing item isn't big enough, it is extended.
303 * If it is too large, it is truncated.
305 * If the key isn't in the destination yet, a new item is inserted.
307 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
308 struct btrfs_root
*root
,
309 struct btrfs_path
*path
,
310 struct extent_buffer
*eb
, int slot
,
311 struct btrfs_key
*key
)
315 u64 saved_i_size
= 0;
316 int save_old_i_size
= 0;
317 unsigned long src_ptr
;
318 unsigned long dst_ptr
;
319 int overwrite_root
= 0;
321 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
324 item_size
= btrfs_item_size_nr(eb
, slot
);
325 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
327 /* look for the key in the destination tree */
328 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
332 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
334 if (dst_size
!= item_size
)
337 if (item_size
== 0) {
338 btrfs_release_path(path
);
341 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
342 src_copy
= kmalloc(item_size
, GFP_NOFS
);
343 if (!dst_copy
|| !src_copy
) {
344 btrfs_release_path(path
);
350 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
352 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
353 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
355 ret
= memcmp(dst_copy
, src_copy
, item_size
);
360 * they have the same contents, just return, this saves
361 * us from cowing blocks in the destination tree and doing
362 * extra writes that may not have been done by a previous
366 btrfs_release_path(path
);
372 btrfs_release_path(path
);
373 /* try to insert the key into the destination tree */
374 ret
= btrfs_insert_empty_item(trans
, root
, path
,
377 /* make sure any existing item is the correct size */
378 if (ret
== -EEXIST
) {
380 found_size
= btrfs_item_size_nr(path
->nodes
[0],
382 if (found_size
> item_size
)
383 btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
384 else if (found_size
< item_size
)
385 btrfs_extend_item(trans
, root
, path
,
386 item_size
- found_size
);
390 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
393 /* don't overwrite an existing inode if the generation number
394 * was logged as zero. This is done when the tree logging code
395 * is just logging an inode to make sure it exists after recovery.
397 * Also, don't overwrite i_size on directories during replay.
398 * log replay inserts and removes directory items based on the
399 * state of the tree found in the subvolume, and i_size is modified
402 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
403 struct btrfs_inode_item
*src_item
;
404 struct btrfs_inode_item
*dst_item
;
406 src_item
= (struct btrfs_inode_item
*)src_ptr
;
407 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
409 if (btrfs_inode_generation(eb
, src_item
) == 0)
412 if (overwrite_root
&&
413 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
414 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
416 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
421 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
424 if (save_old_i_size
) {
425 struct btrfs_inode_item
*dst_item
;
426 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
427 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
430 /* make sure the generation is filled in */
431 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
432 struct btrfs_inode_item
*dst_item
;
433 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
434 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
435 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
440 btrfs_mark_buffer_dirty(path
->nodes
[0]);
441 btrfs_release_path(path
);
446 * simple helper to read an inode off the disk from a given root
447 * This can only be called for subvolume roots and not for the log
449 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
452 struct btrfs_key key
;
455 key
.objectid
= objectid
;
456 key
.type
= BTRFS_INODE_ITEM_KEY
;
458 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
461 } else if (is_bad_inode(inode
)) {
468 /* replays a single extent in 'eb' at 'slot' with 'key' into the
469 * subvolume 'root'. path is released on entry and should be released
472 * extents in the log tree have not been allocated out of the extent
473 * tree yet. So, this completes the allocation, taking a reference
474 * as required if the extent already exists or creating a new extent
475 * if it isn't in the extent allocation tree yet.
477 * The extent is inserted into the file, dropping any existing extents
478 * from the file that overlap the new one.
480 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
481 struct btrfs_root
*root
,
482 struct btrfs_path
*path
,
483 struct extent_buffer
*eb
, int slot
,
484 struct btrfs_key
*key
)
488 u64 start
= key
->offset
;
490 struct btrfs_file_extent_item
*item
;
491 struct inode
*inode
= NULL
;
495 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
496 found_type
= btrfs_file_extent_type(eb
, item
);
498 if (found_type
== BTRFS_FILE_EXTENT_REG
||
499 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)
500 extent_end
= start
+ btrfs_file_extent_num_bytes(eb
, item
);
501 else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
502 size
= btrfs_file_extent_inline_len(eb
, item
);
503 extent_end
= ALIGN(start
+ size
, root
->sectorsize
);
509 inode
= read_one_inode(root
, key
->objectid
);
516 * first check to see if we already have this extent in the
517 * file. This must be done before the btrfs_drop_extents run
518 * so we don't try to drop this extent.
520 ret
= btrfs_lookup_file_extent(trans
, root
, path
, btrfs_ino(inode
),
524 (found_type
== BTRFS_FILE_EXTENT_REG
||
525 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
526 struct btrfs_file_extent_item cmp1
;
527 struct btrfs_file_extent_item cmp2
;
528 struct btrfs_file_extent_item
*existing
;
529 struct extent_buffer
*leaf
;
531 leaf
= path
->nodes
[0];
532 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
533 struct btrfs_file_extent_item
);
535 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
537 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
541 * we already have a pointer to this exact extent,
542 * we don't have to do anything
544 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
545 btrfs_release_path(path
);
549 btrfs_release_path(path
);
551 saved_nbytes
= inode_get_bytes(inode
);
552 /* drop any overlapping extents */
553 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
556 if (found_type
== BTRFS_FILE_EXTENT_REG
||
557 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
559 unsigned long dest_offset
;
560 struct btrfs_key ins
;
562 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
565 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
567 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
568 (unsigned long)item
, sizeof(*item
));
570 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
571 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
572 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
573 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
575 if (ins
.objectid
> 0) {
578 LIST_HEAD(ordered_sums
);
580 * is this extent already allocated in the extent
581 * allocation tree? If so, just add a reference
583 ret
= btrfs_lookup_extent(root
, ins
.objectid
,
586 ret
= btrfs_inc_extent_ref(trans
, root
,
587 ins
.objectid
, ins
.offset
,
588 0, root
->root_key
.objectid
,
589 key
->objectid
, offset
, 0);
593 * insert the extent pointer in the extent
596 ret
= btrfs_alloc_logged_file_extent(trans
,
597 root
, root
->root_key
.objectid
,
598 key
->objectid
, offset
, &ins
);
601 btrfs_release_path(path
);
603 if (btrfs_file_extent_compression(eb
, item
)) {
604 csum_start
= ins
.objectid
;
605 csum_end
= csum_start
+ ins
.offset
;
607 csum_start
= ins
.objectid
+
608 btrfs_file_extent_offset(eb
, item
);
609 csum_end
= csum_start
+
610 btrfs_file_extent_num_bytes(eb
, item
);
613 ret
= btrfs_lookup_csums_range(root
->log_root
,
614 csum_start
, csum_end
- 1,
617 while (!list_empty(&ordered_sums
)) {
618 struct btrfs_ordered_sum
*sums
;
619 sums
= list_entry(ordered_sums
.next
,
620 struct btrfs_ordered_sum
,
622 ret
= btrfs_csum_file_blocks(trans
,
623 root
->fs_info
->csum_root
,
626 list_del(&sums
->list
);
630 btrfs_release_path(path
);
632 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
633 /* inline extents are easy, we just overwrite them */
634 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
638 inode_set_bytes(inode
, saved_nbytes
);
639 ret
= btrfs_update_inode(trans
, root
, inode
);
647 * when cleaning up conflicts between the directory names in the
648 * subvolume, directory names in the log and directory names in the
649 * inode back references, we may have to unlink inodes from directories.
651 * This is a helper function to do the unlink of a specific directory
654 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
655 struct btrfs_root
*root
,
656 struct btrfs_path
*path
,
658 struct btrfs_dir_item
*di
)
663 struct extent_buffer
*leaf
;
664 struct btrfs_key location
;
667 leaf
= path
->nodes
[0];
669 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
670 name_len
= btrfs_dir_name_len(leaf
, di
);
671 name
= kmalloc(name_len
, GFP_NOFS
);
675 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
676 btrfs_release_path(path
);
678 inode
= read_one_inode(root
, location
.objectid
);
684 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
687 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
693 btrfs_run_delayed_items(trans
, root
);
698 * helper function to see if a given name and sequence number found
699 * in an inode back reference are already in a directory and correctly
700 * point to this inode
702 static noinline
int inode_in_dir(struct btrfs_root
*root
,
703 struct btrfs_path
*path
,
704 u64 dirid
, u64 objectid
, u64 index
,
705 const char *name
, int name_len
)
707 struct btrfs_dir_item
*di
;
708 struct btrfs_key location
;
711 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
712 index
, name
, name_len
, 0);
713 if (di
&& !IS_ERR(di
)) {
714 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
715 if (location
.objectid
!= objectid
)
719 btrfs_release_path(path
);
721 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
722 if (di
&& !IS_ERR(di
)) {
723 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
724 if (location
.objectid
!= objectid
)
730 btrfs_release_path(path
);
735 * helper function to check a log tree for a named back reference in
736 * an inode. This is used to decide if a back reference that is
737 * found in the subvolume conflicts with what we find in the log.
739 * inode backreferences may have multiple refs in a single item,
740 * during replay we process one reference at a time, and we don't
741 * want to delete valid links to a file from the subvolume if that
742 * link is also in the log.
744 static noinline
int backref_in_log(struct btrfs_root
*log
,
745 struct btrfs_key
*key
,
747 char *name
, int namelen
)
749 struct btrfs_path
*path
;
750 struct btrfs_inode_ref
*ref
;
752 unsigned long ptr_end
;
753 unsigned long name_ptr
;
759 path
= btrfs_alloc_path();
763 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
767 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
769 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
770 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
771 name
, namelen
, NULL
))
777 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
778 ptr_end
= ptr
+ item_size
;
779 while (ptr
< ptr_end
) {
780 ref
= (struct btrfs_inode_ref
*)ptr
;
781 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
782 if (found_name_len
== namelen
) {
783 name_ptr
= (unsigned long)(ref
+ 1);
784 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
791 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
794 btrfs_free_path(path
);
798 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
799 struct btrfs_root
*root
,
800 struct btrfs_path
*path
,
801 struct btrfs_root
*log_root
,
802 struct inode
*dir
, struct inode
*inode
,
803 struct extent_buffer
*eb
,
804 u64 inode_objectid
, u64 parent_objectid
,
805 u64 ref_index
, char *name
, int namelen
,
811 struct extent_buffer
*leaf
;
812 struct btrfs_dir_item
*di
;
813 struct btrfs_key search_key
;
814 struct btrfs_inode_extref
*extref
;
817 /* Search old style refs */
818 search_key
.objectid
= inode_objectid
;
819 search_key
.type
= BTRFS_INODE_REF_KEY
;
820 search_key
.offset
= parent_objectid
;
821 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
823 struct btrfs_inode_ref
*victim_ref
;
825 unsigned long ptr_end
;
827 leaf
= path
->nodes
[0];
829 /* are we trying to overwrite a back ref for the root directory
830 * if so, just jump out, we're done
832 if (search_key
.objectid
== search_key
.offset
)
835 /* check all the names in this back reference to see
836 * if they are in the log. if so, we allow them to stay
837 * otherwise they must be unlinked as a conflict
839 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
840 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
841 while (ptr
< ptr_end
) {
842 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
843 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
845 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
846 BUG_ON(!victim_name
);
848 read_extent_buffer(leaf
, victim_name
,
849 (unsigned long)(victim_ref
+ 1),
852 if (!backref_in_log(log_root
, &search_key
,
856 btrfs_inc_nlink(inode
);
857 btrfs_release_path(path
);
859 ret
= btrfs_unlink_inode(trans
, root
, dir
,
863 btrfs_run_delayed_items(trans
, root
);
870 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
875 * NOTE: we have searched root tree and checked the
876 * coresponding ref, it does not need to check again.
880 btrfs_release_path(path
);
882 /* Same search but for extended refs */
883 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
884 inode_objectid
, parent_objectid
, 0,
886 if (!IS_ERR_OR_NULL(extref
)) {
890 struct inode
*victim_parent
;
892 leaf
= path
->nodes
[0];
894 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
895 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
897 while (cur_offset
< item_size
) {
898 extref
= (struct btrfs_inode_extref
*)base
+ cur_offset
;
900 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
902 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
905 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
906 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
909 search_key
.objectid
= inode_objectid
;
910 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
911 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
915 if (!backref_in_log(log_root
, &search_key
,
916 parent_objectid
, victim_name
,
919 victim_parent
= read_one_inode(root
,
922 btrfs_inc_nlink(inode
);
923 btrfs_release_path(path
);
925 ret
= btrfs_unlink_inode(trans
, root
,
930 btrfs_run_delayed_items(trans
, root
);
941 cur_offset
+= victim_name_len
+ sizeof(*extref
);
945 btrfs_release_path(path
);
947 /* look for a conflicting sequence number */
948 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
949 ref_index
, name
, namelen
, 0);
950 if (di
&& !IS_ERR(di
)) {
951 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
954 btrfs_release_path(path
);
956 /* look for a conflicing name */
957 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
959 if (di
&& !IS_ERR(di
)) {
960 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
963 btrfs_release_path(path
);
968 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
969 u32
*namelen
, char **name
, u64
*index
,
970 u64
*parent_objectid
)
972 struct btrfs_inode_extref
*extref
;
974 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
976 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
977 *name
= kmalloc(*namelen
, GFP_NOFS
);
981 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
984 *index
= btrfs_inode_extref_index(eb
, extref
);
986 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
991 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
992 u32
*namelen
, char **name
, u64
*index
)
994 struct btrfs_inode_ref
*ref
;
996 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
998 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
999 *name
= kmalloc(*namelen
, GFP_NOFS
);
1003 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1005 *index
= btrfs_inode_ref_index(eb
, ref
);
1011 * replay one inode back reference item found in the log tree.
1012 * eb, slot and key refer to the buffer and key found in the log tree.
1013 * root is the destination we are replaying into, and path is for temp
1014 * use by this function. (it should be released on return).
1016 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1017 struct btrfs_root
*root
,
1018 struct btrfs_root
*log
,
1019 struct btrfs_path
*path
,
1020 struct extent_buffer
*eb
, int slot
,
1021 struct btrfs_key
*key
)
1024 struct inode
*inode
;
1025 unsigned long ref_ptr
;
1026 unsigned long ref_end
;
1030 int search_done
= 0;
1031 int log_ref_ver
= 0;
1032 u64 parent_objectid
;
1035 int ref_struct_size
;
1037 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1038 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1040 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1041 struct btrfs_inode_extref
*r
;
1043 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1045 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1046 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1048 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1049 parent_objectid
= key
->offset
;
1051 inode_objectid
= key
->objectid
;
1054 * it is possible that we didn't log all the parent directories
1055 * for a given inode. If we don't find the dir, just don't
1056 * copy the back ref in. The link count fixup code will take
1059 dir
= read_one_inode(root
, parent_objectid
);
1063 inode
= read_one_inode(root
, inode_objectid
);
1069 while (ref_ptr
< ref_end
) {
1071 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1072 &ref_index
, &parent_objectid
);
1074 * parent object can change from one array
1078 dir
= read_one_inode(root
, parent_objectid
);
1082 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1088 /* if we already have a perfect match, we're done */
1089 if (!inode_in_dir(root
, path
, btrfs_ino(dir
), btrfs_ino(inode
),
1090 ref_index
, name
, namelen
)) {
1092 * look for a conflicting back reference in the
1093 * metadata. if we find one we have to unlink that name
1094 * of the file before we add our new link. Later on, we
1095 * overwrite any existing back reference, and we don't
1096 * want to create dangling pointers in the directory.
1100 ret
= __add_inode_ref(trans
, root
, path
, log
,
1104 ref_index
, name
, namelen
,
1111 /* insert our name */
1112 ret
= btrfs_add_link(trans
, dir
, inode
, name
, namelen
,
1116 btrfs_update_inode(trans
, root
, inode
);
1119 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1127 /* finally write the back reference in the inode */
1128 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1132 btrfs_release_path(path
);
1138 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1139 struct btrfs_root
*root
, u64 offset
)
1142 ret
= btrfs_find_orphan_item(root
, offset
);
1144 ret
= btrfs_insert_orphan_item(trans
, root
, offset
);
1148 static int count_inode_extrefs(struct btrfs_root
*root
,
1149 struct inode
*inode
, struct btrfs_path
*path
)
1153 unsigned int nlink
= 0;
1156 u64 inode_objectid
= btrfs_ino(inode
);
1159 struct btrfs_inode_extref
*extref
;
1160 struct extent_buffer
*leaf
;
1163 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1168 leaf
= path
->nodes
[0];
1169 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1170 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1172 while (cur_offset
< item_size
) {
1173 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1174 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1178 cur_offset
+= name_len
+ sizeof(*extref
);
1182 btrfs_release_path(path
);
1184 btrfs_release_path(path
);
1191 static int count_inode_refs(struct btrfs_root
*root
,
1192 struct inode
*inode
, struct btrfs_path
*path
)
1195 struct btrfs_key key
;
1196 unsigned int nlink
= 0;
1198 unsigned long ptr_end
;
1200 u64 ino
= btrfs_ino(inode
);
1203 key
.type
= BTRFS_INODE_REF_KEY
;
1204 key
.offset
= (u64
)-1;
1207 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1211 if (path
->slots
[0] == 0)
1215 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1217 if (key
.objectid
!= ino
||
1218 key
.type
!= BTRFS_INODE_REF_KEY
)
1220 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1221 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1223 while (ptr
< ptr_end
) {
1224 struct btrfs_inode_ref
*ref
;
1226 ref
= (struct btrfs_inode_ref
*)ptr
;
1227 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1229 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1233 if (key
.offset
== 0)
1236 btrfs_release_path(path
);
1238 btrfs_release_path(path
);
1244 * There are a few corners where the link count of the file can't
1245 * be properly maintained during replay. So, instead of adding
1246 * lots of complexity to the log code, we just scan the backrefs
1247 * for any file that has been through replay.
1249 * The scan will update the link count on the inode to reflect the
1250 * number of back refs found. If it goes down to zero, the iput
1251 * will free the inode.
1253 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1254 struct btrfs_root
*root
,
1255 struct inode
*inode
)
1257 struct btrfs_path
*path
;
1260 u64 ino
= btrfs_ino(inode
);
1262 path
= btrfs_alloc_path();
1266 ret
= count_inode_refs(root
, inode
, path
);
1272 ret
= count_inode_extrefs(root
, inode
, path
);
1283 if (nlink
!= inode
->i_nlink
) {
1284 set_nlink(inode
, nlink
);
1285 btrfs_update_inode(trans
, root
, inode
);
1287 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1289 if (inode
->i_nlink
== 0) {
1290 if (S_ISDIR(inode
->i_mode
)) {
1291 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1295 ret
= insert_orphan_item(trans
, root
, ino
);
1300 btrfs_free_path(path
);
1304 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1305 struct btrfs_root
*root
,
1306 struct btrfs_path
*path
)
1309 struct btrfs_key key
;
1310 struct inode
*inode
;
1312 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1313 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1314 key
.offset
= (u64
)-1;
1316 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1321 if (path
->slots
[0] == 0)
1326 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1327 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1328 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1331 ret
= btrfs_del_item(trans
, root
, path
);
1335 btrfs_release_path(path
);
1336 inode
= read_one_inode(root
, key
.offset
);
1340 ret
= fixup_inode_link_count(trans
, root
, inode
);
1346 * fixup on a directory may create new entries,
1347 * make sure we always look for the highset possible
1350 key
.offset
= (u64
)-1;
1354 btrfs_release_path(path
);
1360 * record a given inode in the fixup dir so we can check its link
1361 * count when replay is done. The link count is incremented here
1362 * so the inode won't go away until we check it
1364 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1365 struct btrfs_root
*root
,
1366 struct btrfs_path
*path
,
1369 struct btrfs_key key
;
1371 struct inode
*inode
;
1373 inode
= read_one_inode(root
, objectid
);
1377 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1378 btrfs_set_key_type(&key
, BTRFS_ORPHAN_ITEM_KEY
);
1379 key
.offset
= objectid
;
1381 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1383 btrfs_release_path(path
);
1385 btrfs_inc_nlink(inode
);
1386 ret
= btrfs_update_inode(trans
, root
, inode
);
1387 } else if (ret
== -EEXIST
) {
1398 * when replaying the log for a directory, we only insert names
1399 * for inodes that actually exist. This means an fsync on a directory
1400 * does not implicitly fsync all the new files in it
1402 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1403 struct btrfs_root
*root
,
1404 struct btrfs_path
*path
,
1405 u64 dirid
, u64 index
,
1406 char *name
, int name_len
, u8 type
,
1407 struct btrfs_key
*location
)
1409 struct inode
*inode
;
1413 inode
= read_one_inode(root
, location
->objectid
);
1417 dir
= read_one_inode(root
, dirid
);
1422 ret
= btrfs_add_link(trans
, dir
, inode
, name
, name_len
, 1, index
);
1424 /* FIXME, put inode into FIXUP list */
1432 * take a single entry in a log directory item and replay it into
1435 * if a conflicting item exists in the subdirectory already,
1436 * the inode it points to is unlinked and put into the link count
1439 * If a name from the log points to a file or directory that does
1440 * not exist in the FS, it is skipped. fsyncs on directories
1441 * do not force down inodes inside that directory, just changes to the
1442 * names or unlinks in a directory.
1444 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1445 struct btrfs_root
*root
,
1446 struct btrfs_path
*path
,
1447 struct extent_buffer
*eb
,
1448 struct btrfs_dir_item
*di
,
1449 struct btrfs_key
*key
)
1453 struct btrfs_dir_item
*dst_di
;
1454 struct btrfs_key found_key
;
1455 struct btrfs_key log_key
;
1461 dir
= read_one_inode(root
, key
->objectid
);
1465 name_len
= btrfs_dir_name_len(eb
, di
);
1466 name
= kmalloc(name_len
, GFP_NOFS
);
1470 log_type
= btrfs_dir_type(eb
, di
);
1471 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1474 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1475 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1480 btrfs_release_path(path
);
1482 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1483 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1485 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1486 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1493 if (IS_ERR_OR_NULL(dst_di
)) {
1494 /* we need a sequence number to insert, so we only
1495 * do inserts for the BTRFS_DIR_INDEX_KEY types
1497 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1502 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1503 /* the existing item matches the logged item */
1504 if (found_key
.objectid
== log_key
.objectid
&&
1505 found_key
.type
== log_key
.type
&&
1506 found_key
.offset
== log_key
.offset
&&
1507 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1512 * don't drop the conflicting directory entry if the inode
1513 * for the new entry doesn't exist
1518 ret
= drop_one_dir_item(trans
, root
, path
, dir
, dst_di
);
1521 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1524 btrfs_release_path(path
);
1530 btrfs_release_path(path
);
1531 ret
= insert_one_name(trans
, root
, path
, key
->objectid
, key
->offset
,
1532 name
, name_len
, log_type
, &log_key
);
1534 BUG_ON(ret
&& ret
!= -ENOENT
);
1539 * find all the names in a directory item and reconcile them into
1540 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1541 * one name in a directory item, but the same code gets used for
1542 * both directory index types
1544 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1545 struct btrfs_root
*root
,
1546 struct btrfs_path
*path
,
1547 struct extent_buffer
*eb
, int slot
,
1548 struct btrfs_key
*key
)
1551 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1552 struct btrfs_dir_item
*di
;
1555 unsigned long ptr_end
;
1557 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1558 ptr_end
= ptr
+ item_size
;
1559 while (ptr
< ptr_end
) {
1560 di
= (struct btrfs_dir_item
*)ptr
;
1561 if (verify_dir_item(root
, eb
, di
))
1563 name_len
= btrfs_dir_name_len(eb
, di
);
1564 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1566 ptr
= (unsigned long)(di
+ 1);
1573 * directory replay has two parts. There are the standard directory
1574 * items in the log copied from the subvolume, and range items
1575 * created in the log while the subvolume was logged.
1577 * The range items tell us which parts of the key space the log
1578 * is authoritative for. During replay, if a key in the subvolume
1579 * directory is in a logged range item, but not actually in the log
1580 * that means it was deleted from the directory before the fsync
1581 * and should be removed.
1583 static noinline
int find_dir_range(struct btrfs_root
*root
,
1584 struct btrfs_path
*path
,
1585 u64 dirid
, int key_type
,
1586 u64
*start_ret
, u64
*end_ret
)
1588 struct btrfs_key key
;
1590 struct btrfs_dir_log_item
*item
;
1594 if (*start_ret
== (u64
)-1)
1597 key
.objectid
= dirid
;
1598 key
.type
= key_type
;
1599 key
.offset
= *start_ret
;
1601 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1605 if (path
->slots
[0] == 0)
1610 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1612 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1616 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1617 struct btrfs_dir_log_item
);
1618 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1620 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1622 *start_ret
= key
.offset
;
1623 *end_ret
= found_end
;
1628 /* check the next slot in the tree to see if it is a valid item */
1629 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1630 if (path
->slots
[0] >= nritems
) {
1631 ret
= btrfs_next_leaf(root
, path
);
1638 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1640 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1644 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1645 struct btrfs_dir_log_item
);
1646 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1647 *start_ret
= key
.offset
;
1648 *end_ret
= found_end
;
1651 btrfs_release_path(path
);
1656 * this looks for a given directory item in the log. If the directory
1657 * item is not in the log, the item is removed and the inode it points
1660 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
1661 struct btrfs_root
*root
,
1662 struct btrfs_root
*log
,
1663 struct btrfs_path
*path
,
1664 struct btrfs_path
*log_path
,
1666 struct btrfs_key
*dir_key
)
1669 struct extent_buffer
*eb
;
1672 struct btrfs_dir_item
*di
;
1673 struct btrfs_dir_item
*log_di
;
1676 unsigned long ptr_end
;
1678 struct inode
*inode
;
1679 struct btrfs_key location
;
1682 eb
= path
->nodes
[0];
1683 slot
= path
->slots
[0];
1684 item_size
= btrfs_item_size_nr(eb
, slot
);
1685 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1686 ptr_end
= ptr
+ item_size
;
1687 while (ptr
< ptr_end
) {
1688 di
= (struct btrfs_dir_item
*)ptr
;
1689 if (verify_dir_item(root
, eb
, di
)) {
1694 name_len
= btrfs_dir_name_len(eb
, di
);
1695 name
= kmalloc(name_len
, GFP_NOFS
);
1700 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1703 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
1704 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
1707 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
1708 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
1714 if (IS_ERR_OR_NULL(log_di
)) {
1715 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
1716 btrfs_release_path(path
);
1717 btrfs_release_path(log_path
);
1718 inode
= read_one_inode(root
, location
.objectid
);
1724 ret
= link_to_fixup_dir(trans
, root
,
1725 path
, location
.objectid
);
1727 btrfs_inc_nlink(inode
);
1728 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1732 btrfs_run_delayed_items(trans
, root
);
1737 /* there might still be more names under this key
1738 * check and repeat if required
1740 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
1747 btrfs_release_path(log_path
);
1750 ptr
= (unsigned long)(di
+ 1);
1755 btrfs_release_path(path
);
1756 btrfs_release_path(log_path
);
1761 * deletion replay happens before we copy any new directory items
1762 * out of the log or out of backreferences from inodes. It
1763 * scans the log to find ranges of keys that log is authoritative for,
1764 * and then scans the directory to find items in those ranges that are
1765 * not present in the log.
1767 * Anything we don't find in the log is unlinked and removed from the
1770 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
1771 struct btrfs_root
*root
,
1772 struct btrfs_root
*log
,
1773 struct btrfs_path
*path
,
1774 u64 dirid
, int del_all
)
1778 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
1780 struct btrfs_key dir_key
;
1781 struct btrfs_key found_key
;
1782 struct btrfs_path
*log_path
;
1785 dir_key
.objectid
= dirid
;
1786 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
1787 log_path
= btrfs_alloc_path();
1791 dir
= read_one_inode(root
, dirid
);
1792 /* it isn't an error if the inode isn't there, that can happen
1793 * because we replay the deletes before we copy in the inode item
1797 btrfs_free_path(log_path
);
1805 range_end
= (u64
)-1;
1807 ret
= find_dir_range(log
, path
, dirid
, key_type
,
1808 &range_start
, &range_end
);
1813 dir_key
.offset
= range_start
;
1816 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
1821 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1822 if (path
->slots
[0] >= nritems
) {
1823 ret
= btrfs_next_leaf(root
, path
);
1827 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1829 if (found_key
.objectid
!= dirid
||
1830 found_key
.type
!= dir_key
.type
)
1833 if (found_key
.offset
> range_end
)
1836 ret
= check_item_in_log(trans
, root
, log
, path
,
1840 if (found_key
.offset
== (u64
)-1)
1842 dir_key
.offset
= found_key
.offset
+ 1;
1844 btrfs_release_path(path
);
1845 if (range_end
== (u64
)-1)
1847 range_start
= range_end
+ 1;
1852 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
1853 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
1854 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
1855 btrfs_release_path(path
);
1859 btrfs_release_path(path
);
1860 btrfs_free_path(log_path
);
1866 * the process_func used to replay items from the log tree. This
1867 * gets called in two different stages. The first stage just looks
1868 * for inodes and makes sure they are all copied into the subvolume.
1870 * The second stage copies all the other item types from the log into
1871 * the subvolume. The two stage approach is slower, but gets rid of
1872 * lots of complexity around inodes referencing other inodes that exist
1873 * only in the log (references come from either directory items or inode
1876 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
1877 struct walk_control
*wc
, u64 gen
)
1880 struct btrfs_path
*path
;
1881 struct btrfs_root
*root
= wc
->replay_dest
;
1882 struct btrfs_key key
;
1887 ret
= btrfs_read_buffer(eb
, gen
);
1891 level
= btrfs_header_level(eb
);
1896 path
= btrfs_alloc_path();
1900 nritems
= btrfs_header_nritems(eb
);
1901 for (i
= 0; i
< nritems
; i
++) {
1902 btrfs_item_key_to_cpu(eb
, &key
, i
);
1904 /* inode keys are done during the first stage */
1905 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
1906 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
1907 struct btrfs_inode_item
*inode_item
;
1910 inode_item
= btrfs_item_ptr(eb
, i
,
1911 struct btrfs_inode_item
);
1912 mode
= btrfs_inode_mode(eb
, inode_item
);
1913 if (S_ISDIR(mode
)) {
1914 ret
= replay_dir_deletes(wc
->trans
,
1915 root
, log
, path
, key
.objectid
, 0);
1918 ret
= overwrite_item(wc
->trans
, root
, path
,
1922 /* for regular files, make sure corresponding
1923 * orhpan item exist. extents past the new EOF
1924 * will be truncated later by orphan cleanup.
1926 if (S_ISREG(mode
)) {
1927 ret
= insert_orphan_item(wc
->trans
, root
,
1932 ret
= link_to_fixup_dir(wc
->trans
, root
,
1933 path
, key
.objectid
);
1936 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
1939 /* these keys are simply copied */
1940 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
1941 ret
= overwrite_item(wc
->trans
, root
, path
,
1944 } else if (key
.type
== BTRFS_INODE_REF_KEY
) {
1945 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
1947 BUG_ON(ret
&& ret
!= -ENOENT
);
1948 } else if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
1949 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
1951 BUG_ON(ret
&& ret
!= -ENOENT
);
1952 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
1953 ret
= replay_one_extent(wc
->trans
, root
, path
,
1956 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
||
1957 key
.type
== BTRFS_DIR_INDEX_KEY
) {
1958 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
1963 btrfs_free_path(path
);
1967 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
1968 struct btrfs_root
*root
,
1969 struct btrfs_path
*path
, int *level
,
1970 struct walk_control
*wc
)
1975 struct extent_buffer
*next
;
1976 struct extent_buffer
*cur
;
1977 struct extent_buffer
*parent
;
1981 WARN_ON(*level
< 0);
1982 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1984 while (*level
> 0) {
1985 WARN_ON(*level
< 0);
1986 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
1987 cur
= path
->nodes
[*level
];
1989 if (btrfs_header_level(cur
) != *level
)
1992 if (path
->slots
[*level
] >=
1993 btrfs_header_nritems(cur
))
1996 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
1997 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
1998 blocksize
= btrfs_level_size(root
, *level
- 1);
2000 parent
= path
->nodes
[*level
];
2001 root_owner
= btrfs_header_owner(parent
);
2003 next
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
2008 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2012 path
->slots
[*level
]++;
2014 ret
= btrfs_read_buffer(next
, ptr_gen
);
2016 free_extent_buffer(next
);
2020 btrfs_tree_lock(next
);
2021 btrfs_set_lock_blocking(next
);
2022 clean_tree_block(trans
, root
, next
);
2023 btrfs_wait_tree_block_writeback(next
);
2024 btrfs_tree_unlock(next
);
2026 WARN_ON(root_owner
!=
2027 BTRFS_TREE_LOG_OBJECTID
);
2028 ret
= btrfs_free_and_pin_reserved_extent(root
,
2030 BUG_ON(ret
); /* -ENOMEM or logic errors */
2032 free_extent_buffer(next
);
2035 ret
= btrfs_read_buffer(next
, ptr_gen
);
2037 free_extent_buffer(next
);
2041 WARN_ON(*level
<= 0);
2042 if (path
->nodes
[*level
-1])
2043 free_extent_buffer(path
->nodes
[*level
-1]);
2044 path
->nodes
[*level
-1] = next
;
2045 *level
= btrfs_header_level(next
);
2046 path
->slots
[*level
] = 0;
2049 WARN_ON(*level
< 0);
2050 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2052 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2058 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2059 struct btrfs_root
*root
,
2060 struct btrfs_path
*path
, int *level
,
2061 struct walk_control
*wc
)
2068 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2069 slot
= path
->slots
[i
];
2070 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2073 WARN_ON(*level
== 0);
2076 struct extent_buffer
*parent
;
2077 if (path
->nodes
[*level
] == root
->node
)
2078 parent
= path
->nodes
[*level
];
2080 parent
= path
->nodes
[*level
+ 1];
2082 root_owner
= btrfs_header_owner(parent
);
2083 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2084 btrfs_header_generation(path
->nodes
[*level
]));
2089 struct extent_buffer
*next
;
2091 next
= path
->nodes
[*level
];
2093 btrfs_tree_lock(next
);
2094 btrfs_set_lock_blocking(next
);
2095 clean_tree_block(trans
, root
, next
);
2096 btrfs_wait_tree_block_writeback(next
);
2097 btrfs_tree_unlock(next
);
2099 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2100 ret
= btrfs_free_and_pin_reserved_extent(root
,
2101 path
->nodes
[*level
]->start
,
2102 path
->nodes
[*level
]->len
);
2105 free_extent_buffer(path
->nodes
[*level
]);
2106 path
->nodes
[*level
] = NULL
;
2114 * drop the reference count on the tree rooted at 'snap'. This traverses
2115 * the tree freeing any blocks that have a ref count of zero after being
2118 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2119 struct btrfs_root
*log
, struct walk_control
*wc
)
2124 struct btrfs_path
*path
;
2128 path
= btrfs_alloc_path();
2132 level
= btrfs_header_level(log
->node
);
2134 path
->nodes
[level
] = log
->node
;
2135 extent_buffer_get(log
->node
);
2136 path
->slots
[level
] = 0;
2139 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2147 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2156 /* was the root node processed? if not, catch it here */
2157 if (path
->nodes
[orig_level
]) {
2158 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2159 btrfs_header_generation(path
->nodes
[orig_level
]));
2163 struct extent_buffer
*next
;
2165 next
= path
->nodes
[orig_level
];
2167 btrfs_tree_lock(next
);
2168 btrfs_set_lock_blocking(next
);
2169 clean_tree_block(trans
, log
, next
);
2170 btrfs_wait_tree_block_writeback(next
);
2171 btrfs_tree_unlock(next
);
2173 WARN_ON(log
->root_key
.objectid
!=
2174 BTRFS_TREE_LOG_OBJECTID
);
2175 ret
= btrfs_free_and_pin_reserved_extent(log
, next
->start
,
2177 BUG_ON(ret
); /* -ENOMEM or logic errors */
2182 for (i
= 0; i
<= orig_level
; i
++) {
2183 if (path
->nodes
[i
]) {
2184 free_extent_buffer(path
->nodes
[i
]);
2185 path
->nodes
[i
] = NULL
;
2188 btrfs_free_path(path
);
2193 * helper function to update the item for a given subvolumes log root
2194 * in the tree of log roots
2196 static int update_log_root(struct btrfs_trans_handle
*trans
,
2197 struct btrfs_root
*log
)
2201 if (log
->log_transid
== 1) {
2202 /* insert root item on the first sync */
2203 ret
= btrfs_insert_root(trans
, log
->fs_info
->log_root_tree
,
2204 &log
->root_key
, &log
->root_item
);
2206 ret
= btrfs_update_root(trans
, log
->fs_info
->log_root_tree
,
2207 &log
->root_key
, &log
->root_item
);
2212 static int wait_log_commit(struct btrfs_trans_handle
*trans
,
2213 struct btrfs_root
*root
, unsigned long transid
)
2216 int index
= transid
% 2;
2219 * we only allow two pending log transactions at a time,
2220 * so we know that if ours is more than 2 older than the
2221 * current transaction, we're done
2224 prepare_to_wait(&root
->log_commit_wait
[index
],
2225 &wait
, TASK_UNINTERRUPTIBLE
);
2226 mutex_unlock(&root
->log_mutex
);
2228 if (root
->fs_info
->last_trans_log_full_commit
!=
2229 trans
->transid
&& root
->log_transid
< transid
+ 2 &&
2230 atomic_read(&root
->log_commit
[index
]))
2233 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2234 mutex_lock(&root
->log_mutex
);
2235 } while (root
->fs_info
->last_trans_log_full_commit
!=
2236 trans
->transid
&& root
->log_transid
< transid
+ 2 &&
2237 atomic_read(&root
->log_commit
[index
]));
2241 static void wait_for_writer(struct btrfs_trans_handle
*trans
,
2242 struct btrfs_root
*root
)
2245 while (root
->fs_info
->last_trans_log_full_commit
!=
2246 trans
->transid
&& atomic_read(&root
->log_writers
)) {
2247 prepare_to_wait(&root
->log_writer_wait
,
2248 &wait
, TASK_UNINTERRUPTIBLE
);
2249 mutex_unlock(&root
->log_mutex
);
2250 if (root
->fs_info
->last_trans_log_full_commit
!=
2251 trans
->transid
&& atomic_read(&root
->log_writers
))
2253 mutex_lock(&root
->log_mutex
);
2254 finish_wait(&root
->log_writer_wait
, &wait
);
2259 * btrfs_sync_log does sends a given tree log down to the disk and
2260 * updates the super blocks to record it. When this call is done,
2261 * you know that any inodes previously logged are safely on disk only
2264 * Any other return value means you need to call btrfs_commit_transaction.
2265 * Some of the edge cases for fsyncing directories that have had unlinks
2266 * or renames done in the past mean that sometimes the only safe
2267 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2268 * that has happened.
2270 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2271 struct btrfs_root
*root
)
2277 struct btrfs_root
*log
= root
->log_root
;
2278 struct btrfs_root
*log_root_tree
= root
->fs_info
->log_root_tree
;
2279 unsigned long log_transid
= 0;
2281 mutex_lock(&root
->log_mutex
);
2282 log_transid
= root
->log_transid
;
2283 index1
= root
->log_transid
% 2;
2284 if (atomic_read(&root
->log_commit
[index1
])) {
2285 wait_log_commit(trans
, root
, root
->log_transid
);
2286 mutex_unlock(&root
->log_mutex
);
2289 atomic_set(&root
->log_commit
[index1
], 1);
2291 /* wait for previous tree log sync to complete */
2292 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2293 wait_log_commit(trans
, root
, root
->log_transid
- 1);
2295 int batch
= atomic_read(&root
->log_batch
);
2296 /* when we're on an ssd, just kick the log commit out */
2297 if (!btrfs_test_opt(root
, SSD
) && root
->log_multiple_pids
) {
2298 mutex_unlock(&root
->log_mutex
);
2299 schedule_timeout_uninterruptible(1);
2300 mutex_lock(&root
->log_mutex
);
2302 wait_for_writer(trans
, root
);
2303 if (batch
== atomic_read(&root
->log_batch
))
2307 /* bail out if we need to do a full commit */
2308 if (root
->fs_info
->last_trans_log_full_commit
== trans
->transid
) {
2310 btrfs_free_logged_extents(log
, log_transid
);
2311 mutex_unlock(&root
->log_mutex
);
2315 if (log_transid
% 2 == 0)
2316 mark
= EXTENT_DIRTY
;
2320 /* we start IO on all the marked extents here, but we don't actually
2321 * wait for them until later.
2323 ret
= btrfs_write_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2325 btrfs_abort_transaction(trans
, root
, ret
);
2326 btrfs_free_logged_extents(log
, log_transid
);
2327 mutex_unlock(&root
->log_mutex
);
2331 btrfs_set_root_node(&log
->root_item
, log
->node
);
2333 root
->log_transid
++;
2334 log
->log_transid
= root
->log_transid
;
2335 root
->log_start_pid
= 0;
2338 * IO has been started, blocks of the log tree have WRITTEN flag set
2339 * in their headers. new modifications of the log will be written to
2340 * new positions. so it's safe to allow log writers to go in.
2342 mutex_unlock(&root
->log_mutex
);
2344 mutex_lock(&log_root_tree
->log_mutex
);
2345 atomic_inc(&log_root_tree
->log_batch
);
2346 atomic_inc(&log_root_tree
->log_writers
);
2347 mutex_unlock(&log_root_tree
->log_mutex
);
2349 ret
= update_log_root(trans
, log
);
2351 mutex_lock(&log_root_tree
->log_mutex
);
2352 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2354 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2355 wake_up(&log_root_tree
->log_writer_wait
);
2359 if (ret
!= -ENOSPC
) {
2360 btrfs_abort_transaction(trans
, root
, ret
);
2361 mutex_unlock(&log_root_tree
->log_mutex
);
2364 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
2365 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2366 btrfs_free_logged_extents(log
, log_transid
);
2367 mutex_unlock(&log_root_tree
->log_mutex
);
2372 index2
= log_root_tree
->log_transid
% 2;
2373 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2374 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2375 wait_log_commit(trans
, log_root_tree
,
2376 log_root_tree
->log_transid
);
2377 btrfs_free_logged_extents(log
, log_transid
);
2378 mutex_unlock(&log_root_tree
->log_mutex
);
2382 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2384 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2385 wait_log_commit(trans
, log_root_tree
,
2386 log_root_tree
->log_transid
- 1);
2389 wait_for_writer(trans
, log_root_tree
);
2392 * now that we've moved on to the tree of log tree roots,
2393 * check the full commit flag again
2395 if (root
->fs_info
->last_trans_log_full_commit
== trans
->transid
) {
2396 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2397 btrfs_free_logged_extents(log
, log_transid
);
2398 mutex_unlock(&log_root_tree
->log_mutex
);
2400 goto out_wake_log_root
;
2403 ret
= btrfs_write_and_wait_marked_extents(log_root_tree
,
2404 &log_root_tree
->dirty_log_pages
,
2405 EXTENT_DIRTY
| EXTENT_NEW
);
2407 btrfs_abort_transaction(trans
, root
, ret
);
2408 btrfs_free_logged_extents(log
, log_transid
);
2409 mutex_unlock(&log_root_tree
->log_mutex
);
2410 goto out_wake_log_root
;
2412 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2413 btrfs_wait_logged_extents(log
, log_transid
);
2415 btrfs_set_super_log_root(root
->fs_info
->super_for_commit
,
2416 log_root_tree
->node
->start
);
2417 btrfs_set_super_log_root_level(root
->fs_info
->super_for_commit
,
2418 btrfs_header_level(log_root_tree
->node
));
2420 log_root_tree
->log_transid
++;
2423 mutex_unlock(&log_root_tree
->log_mutex
);
2426 * nobody else is going to jump in and write the the ctree
2427 * super here because the log_commit atomic below is protecting
2428 * us. We must be called with a transaction handle pinning
2429 * the running transaction open, so a full commit can't hop
2430 * in and cause problems either.
2432 btrfs_scrub_pause_super(root
);
2433 ret
= write_ctree_super(trans
, root
->fs_info
->tree_root
, 1);
2434 btrfs_scrub_continue_super(root
);
2436 btrfs_abort_transaction(trans
, root
, ret
);
2437 goto out_wake_log_root
;
2440 mutex_lock(&root
->log_mutex
);
2441 if (root
->last_log_commit
< log_transid
)
2442 root
->last_log_commit
= log_transid
;
2443 mutex_unlock(&root
->log_mutex
);
2446 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
2448 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
2449 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
2451 atomic_set(&root
->log_commit
[index1
], 0);
2453 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
2454 wake_up(&root
->log_commit_wait
[index1
]);
2458 static void free_log_tree(struct btrfs_trans_handle
*trans
,
2459 struct btrfs_root
*log
)
2464 struct walk_control wc
= {
2466 .process_func
= process_one_buffer
2469 ret
= walk_log_tree(trans
, log
, &wc
);
2473 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
2474 0, &start
, &end
, EXTENT_DIRTY
| EXTENT_NEW
,
2479 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
2480 EXTENT_DIRTY
| EXTENT_NEW
, GFP_NOFS
);
2484 * We may have short-circuited the log tree with the full commit logic
2485 * and left ordered extents on our list, so clear these out to keep us
2486 * from leaking inodes and memory.
2488 btrfs_free_logged_extents(log
, 0);
2489 btrfs_free_logged_extents(log
, 1);
2491 free_extent_buffer(log
->node
);
2496 * free all the extents used by the tree log. This should be called
2497 * at commit time of the full transaction
2499 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
2501 if (root
->log_root
) {
2502 free_log_tree(trans
, root
->log_root
);
2503 root
->log_root
= NULL
;
2508 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
2509 struct btrfs_fs_info
*fs_info
)
2511 if (fs_info
->log_root_tree
) {
2512 free_log_tree(trans
, fs_info
->log_root_tree
);
2513 fs_info
->log_root_tree
= NULL
;
2519 * If both a file and directory are logged, and unlinks or renames are
2520 * mixed in, we have a few interesting corners:
2522 * create file X in dir Y
2523 * link file X to X.link in dir Y
2525 * unlink file X but leave X.link
2528 * After a crash we would expect only X.link to exist. But file X
2529 * didn't get fsync'd again so the log has back refs for X and X.link.
2531 * We solve this by removing directory entries and inode backrefs from the
2532 * log when a file that was logged in the current transaction is
2533 * unlinked. Any later fsync will include the updated log entries, and
2534 * we'll be able to reconstruct the proper directory items from backrefs.
2536 * This optimizations allows us to avoid relogging the entire inode
2537 * or the entire directory.
2539 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
2540 struct btrfs_root
*root
,
2541 const char *name
, int name_len
,
2542 struct inode
*dir
, u64 index
)
2544 struct btrfs_root
*log
;
2545 struct btrfs_dir_item
*di
;
2546 struct btrfs_path
*path
;
2550 u64 dir_ino
= btrfs_ino(dir
);
2552 if (BTRFS_I(dir
)->logged_trans
< trans
->transid
)
2555 ret
= join_running_log_trans(root
);
2559 mutex_lock(&BTRFS_I(dir
)->log_mutex
);
2561 log
= root
->log_root
;
2562 path
= btrfs_alloc_path();
2568 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
2569 name
, name_len
, -1);
2575 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2576 bytes_del
+= name_len
;
2579 btrfs_release_path(path
);
2580 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
2581 index
, name
, name_len
, -1);
2587 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2588 bytes_del
+= name_len
;
2592 /* update the directory size in the log to reflect the names
2596 struct btrfs_key key
;
2598 key
.objectid
= dir_ino
;
2600 key
.type
= BTRFS_INODE_ITEM_KEY
;
2601 btrfs_release_path(path
);
2603 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
2609 struct btrfs_inode_item
*item
;
2612 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2613 struct btrfs_inode_item
);
2614 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
2615 if (i_size
> bytes_del
)
2616 i_size
-= bytes_del
;
2619 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
2620 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2623 btrfs_release_path(path
);
2626 btrfs_free_path(path
);
2628 mutex_unlock(&BTRFS_I(dir
)->log_mutex
);
2629 if (ret
== -ENOSPC
) {
2630 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
2633 btrfs_abort_transaction(trans
, root
, ret
);
2635 btrfs_end_log_trans(root
);
2640 /* see comments for btrfs_del_dir_entries_in_log */
2641 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
2642 struct btrfs_root
*root
,
2643 const char *name
, int name_len
,
2644 struct inode
*inode
, u64 dirid
)
2646 struct btrfs_root
*log
;
2650 if (BTRFS_I(inode
)->logged_trans
< trans
->transid
)
2653 ret
= join_running_log_trans(root
);
2656 log
= root
->log_root
;
2657 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
2659 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
2661 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
2662 if (ret
== -ENOSPC
) {
2663 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
2665 } else if (ret
< 0 && ret
!= -ENOENT
)
2666 btrfs_abort_transaction(trans
, root
, ret
);
2667 btrfs_end_log_trans(root
);
2673 * creates a range item in the log for 'dirid'. first_offset and
2674 * last_offset tell us which parts of the key space the log should
2675 * be considered authoritative for.
2677 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
2678 struct btrfs_root
*log
,
2679 struct btrfs_path
*path
,
2680 int key_type
, u64 dirid
,
2681 u64 first_offset
, u64 last_offset
)
2684 struct btrfs_key key
;
2685 struct btrfs_dir_log_item
*item
;
2687 key
.objectid
= dirid
;
2688 key
.offset
= first_offset
;
2689 if (key_type
== BTRFS_DIR_ITEM_KEY
)
2690 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
2692 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
2693 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
2697 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2698 struct btrfs_dir_log_item
);
2699 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
2700 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2701 btrfs_release_path(path
);
2706 * log all the items included in the current transaction for a given
2707 * directory. This also creates the range items in the log tree required
2708 * to replay anything deleted before the fsync
2710 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
2711 struct btrfs_root
*root
, struct inode
*inode
,
2712 struct btrfs_path
*path
,
2713 struct btrfs_path
*dst_path
, int key_type
,
2714 u64 min_offset
, u64
*last_offset_ret
)
2716 struct btrfs_key min_key
;
2717 struct btrfs_key max_key
;
2718 struct btrfs_root
*log
= root
->log_root
;
2719 struct extent_buffer
*src
;
2724 u64 first_offset
= min_offset
;
2725 u64 last_offset
= (u64
)-1;
2726 u64 ino
= btrfs_ino(inode
);
2728 log
= root
->log_root
;
2729 max_key
.objectid
= ino
;
2730 max_key
.offset
= (u64
)-1;
2731 max_key
.type
= key_type
;
2733 min_key
.objectid
= ino
;
2734 min_key
.type
= key_type
;
2735 min_key
.offset
= min_offset
;
2737 path
->keep_locks
= 1;
2739 ret
= btrfs_search_forward(root
, &min_key
, &max_key
,
2740 path
, trans
->transid
);
2743 * we didn't find anything from this transaction, see if there
2744 * is anything at all
2746 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
2747 min_key
.objectid
= ino
;
2748 min_key
.type
= key_type
;
2749 min_key
.offset
= (u64
)-1;
2750 btrfs_release_path(path
);
2751 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
2753 btrfs_release_path(path
);
2756 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
2758 /* if ret == 0 there are items for this type,
2759 * create a range to tell us the last key of this type.
2760 * otherwise, there are no items in this directory after
2761 * *min_offset, and we create a range to indicate that.
2764 struct btrfs_key tmp
;
2765 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
2767 if (key_type
== tmp
.type
)
2768 first_offset
= max(min_offset
, tmp
.offset
) + 1;
2773 /* go backward to find any previous key */
2774 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
2776 struct btrfs_key tmp
;
2777 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
2778 if (key_type
== tmp
.type
) {
2779 first_offset
= tmp
.offset
;
2780 ret
= overwrite_item(trans
, log
, dst_path
,
2781 path
->nodes
[0], path
->slots
[0],
2789 btrfs_release_path(path
);
2791 /* find the first key from this transaction again */
2792 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
2799 * we have a block from this transaction, log every item in it
2800 * from our directory
2803 struct btrfs_key tmp
;
2804 src
= path
->nodes
[0];
2805 nritems
= btrfs_header_nritems(src
);
2806 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2807 btrfs_item_key_to_cpu(src
, &min_key
, i
);
2809 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
2811 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
2818 path
->slots
[0] = nritems
;
2821 * look ahead to the next item and see if it is also
2822 * from this directory and from this transaction
2824 ret
= btrfs_next_leaf(root
, path
);
2826 last_offset
= (u64
)-1;
2829 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
2830 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
2831 last_offset
= (u64
)-1;
2834 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
2835 ret
= overwrite_item(trans
, log
, dst_path
,
2836 path
->nodes
[0], path
->slots
[0],
2841 last_offset
= tmp
.offset
;
2846 btrfs_release_path(path
);
2847 btrfs_release_path(dst_path
);
2850 *last_offset_ret
= last_offset
;
2852 * insert the log range keys to indicate where the log
2855 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
2856 ino
, first_offset
, last_offset
);
2864 * logging directories is very similar to logging inodes, We find all the items
2865 * from the current transaction and write them to the log.
2867 * The recovery code scans the directory in the subvolume, and if it finds a
2868 * key in the range logged that is not present in the log tree, then it means
2869 * that dir entry was unlinked during the transaction.
2871 * In order for that scan to work, we must include one key smaller than
2872 * the smallest logged by this transaction and one key larger than the largest
2873 * key logged by this transaction.
2875 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
2876 struct btrfs_root
*root
, struct inode
*inode
,
2877 struct btrfs_path
*path
,
2878 struct btrfs_path
*dst_path
)
2883 int key_type
= BTRFS_DIR_ITEM_KEY
;
2889 ret
= log_dir_items(trans
, root
, inode
, path
,
2890 dst_path
, key_type
, min_key
,
2894 if (max_key
== (u64
)-1)
2896 min_key
= max_key
+ 1;
2899 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
2900 key_type
= BTRFS_DIR_INDEX_KEY
;
2907 * a helper function to drop items from the log before we relog an
2908 * inode. max_key_type indicates the highest item type to remove.
2909 * This cannot be run for file data extents because it does not
2910 * free the extents they point to.
2912 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
2913 struct btrfs_root
*log
,
2914 struct btrfs_path
*path
,
2915 u64 objectid
, int max_key_type
)
2918 struct btrfs_key key
;
2919 struct btrfs_key found_key
;
2922 key
.objectid
= objectid
;
2923 key
.type
= max_key_type
;
2924 key
.offset
= (u64
)-1;
2927 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
2932 if (path
->slots
[0] == 0)
2936 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2939 if (found_key
.objectid
!= objectid
)
2942 found_key
.offset
= 0;
2944 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
2947 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
2948 path
->slots
[0] - start_slot
+ 1);
2950 * If start slot isn't 0 then we don't need to re-search, we've
2951 * found the last guy with the objectid in this tree.
2953 if (ret
|| start_slot
!= 0)
2955 btrfs_release_path(path
);
2957 btrfs_release_path(path
);
2963 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
2964 struct extent_buffer
*leaf
,
2965 struct btrfs_inode_item
*item
,
2966 struct inode
*inode
, int log_inode_only
)
2968 struct btrfs_map_token token
;
2970 btrfs_init_map_token(&token
);
2972 if (log_inode_only
) {
2973 /* set the generation to zero so the recover code
2974 * can tell the difference between an logging
2975 * just to say 'this inode exists' and a logging
2976 * to say 'update this inode with these values'
2978 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
2979 btrfs_set_token_inode_size(leaf
, item
, 0, &token
);
2981 btrfs_set_token_inode_generation(leaf
, item
,
2982 BTRFS_I(inode
)->generation
,
2984 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
2987 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
2988 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
2989 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
2990 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
2992 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_atime(item
),
2993 inode
->i_atime
.tv_sec
, &token
);
2994 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_atime(item
),
2995 inode
->i_atime
.tv_nsec
, &token
);
2997 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_mtime(item
),
2998 inode
->i_mtime
.tv_sec
, &token
);
2999 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_mtime(item
),
3000 inode
->i_mtime
.tv_nsec
, &token
);
3002 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_ctime(item
),
3003 inode
->i_ctime
.tv_sec
, &token
);
3004 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_ctime(item
),
3005 inode
->i_ctime
.tv_nsec
, &token
);
3007 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3010 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3011 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3012 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3013 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3014 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3017 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3018 struct btrfs_root
*log
, struct btrfs_path
*path
,
3019 struct inode
*inode
)
3021 struct btrfs_inode_item
*inode_item
;
3022 struct btrfs_key key
;
3025 memcpy(&key
, &BTRFS_I(inode
)->location
, sizeof(key
));
3026 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
3027 sizeof(*inode_item
));
3028 if (ret
&& ret
!= -EEXIST
)
3030 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3031 struct btrfs_inode_item
);
3032 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
, 0);
3033 btrfs_release_path(path
);
3037 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3038 struct inode
*inode
,
3039 struct btrfs_path
*dst_path
,
3040 struct extent_buffer
*src
,
3041 int start_slot
, int nr
, int inode_only
)
3043 unsigned long src_offset
;
3044 unsigned long dst_offset
;
3045 struct btrfs_root
*log
= BTRFS_I(inode
)->root
->log_root
;
3046 struct btrfs_file_extent_item
*extent
;
3047 struct btrfs_inode_item
*inode_item
;
3049 struct btrfs_key
*ins_keys
;
3053 struct list_head ordered_sums
;
3054 int skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3056 INIT_LIST_HEAD(&ordered_sums
);
3058 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3059 nr
* sizeof(u32
), GFP_NOFS
);
3063 ins_sizes
= (u32
*)ins_data
;
3064 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3066 for (i
= 0; i
< nr
; i
++) {
3067 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3068 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3070 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3071 ins_keys
, ins_sizes
, nr
);
3077 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3078 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3079 dst_path
->slots
[0]);
3081 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3083 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3084 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3086 struct btrfs_inode_item
);
3087 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3088 inode
, inode_only
== LOG_INODE_EXISTS
);
3090 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3091 src_offset
, ins_sizes
[i
]);
3094 /* take a reference on file data extents so that truncates
3095 * or deletes of this inode don't have to relog the inode
3098 if (btrfs_key_type(ins_keys
+ i
) == BTRFS_EXTENT_DATA_KEY
&&
3101 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3102 struct btrfs_file_extent_item
);
3104 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3107 found_type
= btrfs_file_extent_type(src
, extent
);
3108 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3110 ds
= btrfs_file_extent_disk_bytenr(src
,
3112 /* ds == 0 is a hole */
3116 dl
= btrfs_file_extent_disk_num_bytes(src
,
3118 cs
= btrfs_file_extent_offset(src
, extent
);
3119 cl
= btrfs_file_extent_num_bytes(src
,
3121 if (btrfs_file_extent_compression(src
,
3127 ret
= btrfs_lookup_csums_range(
3128 log
->fs_info
->csum_root
,
3129 ds
+ cs
, ds
+ cs
+ cl
- 1,
3136 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3137 btrfs_release_path(dst_path
);
3141 * we have to do this after the loop above to avoid changing the
3142 * log tree while trying to change the log tree.
3145 while (!list_empty(&ordered_sums
)) {
3146 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3147 struct btrfs_ordered_sum
,
3150 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3151 list_del(&sums
->list
);
3157 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3159 struct extent_map
*em1
, *em2
;
3161 em1
= list_entry(a
, struct extent_map
, list
);
3162 em2
= list_entry(b
, struct extent_map
, list
);
3164 if (em1
->start
< em2
->start
)
3166 else if (em1
->start
> em2
->start
)
3171 static int drop_adjacent_extents(struct btrfs_trans_handle
*trans
,
3172 struct btrfs_root
*root
, struct inode
*inode
,
3173 struct extent_map
*em
,
3174 struct btrfs_path
*path
)
3176 struct btrfs_file_extent_item
*fi
;
3177 struct extent_buffer
*leaf
;
3178 struct btrfs_key key
, new_key
;
3179 struct btrfs_map_token token
;
3181 u64 extent_offset
= 0;
3188 btrfs_init_map_token(&token
);
3189 leaf
= path
->nodes
[0];
3191 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3193 ret
= btrfs_del_items(trans
, root
, path
,
3200 ret
= btrfs_next_leaf_write(trans
, root
, path
, 1);
3205 leaf
= path
->nodes
[0];
3208 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3209 if (key
.objectid
!= btrfs_ino(inode
) ||
3210 key
.type
!= BTRFS_EXTENT_DATA_KEY
||
3211 key
.offset
>= em
->start
+ em
->len
)
3214 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3215 struct btrfs_file_extent_item
);
3216 extent_type
= btrfs_token_file_extent_type(leaf
, fi
, &token
);
3217 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
3218 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
3219 extent_offset
= btrfs_token_file_extent_offset(leaf
,
3221 extent_end
= key
.offset
+
3222 btrfs_token_file_extent_num_bytes(leaf
, fi
,
3224 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
3225 extent_end
= key
.offset
+
3226 btrfs_file_extent_inline_len(leaf
, fi
);
3231 if (extent_end
<= em
->len
+ em
->start
) {
3233 del_slot
= path
->slots
[0];
3240 * Ok so we'll ignore previous items if we log a new extent,
3241 * which can lead to overlapping extents, so if we have an
3242 * existing extent we want to adjust we _have_ to check the next
3243 * guy to make sure we even need this extent anymore, this keeps
3244 * us from panicing in set_item_key_safe.
3246 if (path
->slots
[0] < btrfs_header_nritems(leaf
) - 1) {
3247 struct btrfs_key tmp_key
;
3249 btrfs_item_key_to_cpu(leaf
, &tmp_key
,
3250 path
->slots
[0] + 1);
3251 if (tmp_key
.objectid
== btrfs_ino(inode
) &&
3252 tmp_key
.type
== BTRFS_EXTENT_DATA_KEY
&&
3253 tmp_key
.offset
<= em
->start
+ em
->len
) {
3255 del_slot
= path
->slots
[0];
3261 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
3262 memcpy(&new_key
, &key
, sizeof(new_key
));
3263 new_key
.offset
= em
->start
+ em
->len
;
3264 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
3265 extent_offset
+= em
->start
+ em
->len
- key
.offset
;
3266 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
,
3268 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, extent_end
-
3269 (em
->start
+ em
->len
),
3271 btrfs_mark_buffer_dirty(leaf
);
3275 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
3280 static int log_one_extent(struct btrfs_trans_handle
*trans
,
3281 struct inode
*inode
, struct btrfs_root
*root
,
3282 struct extent_map
*em
, struct btrfs_path
*path
)
3284 struct btrfs_root
*log
= root
->log_root
;
3285 struct btrfs_file_extent_item
*fi
;
3286 struct extent_buffer
*leaf
;
3287 struct btrfs_ordered_extent
*ordered
;
3288 struct list_head ordered_sums
;
3289 struct btrfs_map_token token
;
3290 struct btrfs_key key
;
3291 u64 mod_start
= em
->mod_start
;
3292 u64 mod_len
= em
->mod_len
;
3295 u64 extent_offset
= em
->start
- em
->orig_start
;
3298 int index
= log
->log_transid
% 2;
3299 bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3301 INIT_LIST_HEAD(&ordered_sums
);
3302 btrfs_init_map_token(&token
);
3303 key
.objectid
= btrfs_ino(inode
);
3304 key
.type
= BTRFS_EXTENT_DATA_KEY
;
3305 key
.offset
= em
->start
;
3306 path
->really_keep_locks
= 1;
3308 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*fi
));
3309 if (ret
&& ret
!= -EEXIST
) {
3310 path
->really_keep_locks
= 0;
3313 leaf
= path
->nodes
[0];
3314 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3315 struct btrfs_file_extent_item
);
3316 btrfs_set_token_file_extent_generation(leaf
, fi
, em
->generation
,
3318 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
3320 btrfs_set_token_file_extent_type(leaf
, fi
,
3321 BTRFS_FILE_EXTENT_PREALLOC
,
3324 btrfs_set_token_file_extent_type(leaf
, fi
,
3325 BTRFS_FILE_EXTENT_REG
,
3327 if (em
->block_start
== 0)
3331 block_len
= max(em
->block_len
, em
->orig_block_len
);
3332 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
3333 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3336 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3338 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
3339 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3341 extent_offset
, &token
);
3342 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3345 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
3346 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
3350 btrfs_set_token_file_extent_offset(leaf
, fi
,
3351 em
->start
- em
->orig_start
,
3353 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
3354 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->len
, &token
);
3355 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
3357 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
3358 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
3359 btrfs_mark_buffer_dirty(leaf
);
3362 * Have to check the extent to the right of us to make sure it doesn't
3363 * fall in our current range. We're ok if the previous extent is in our
3364 * range since the recovery stuff will run us in key order and thus just
3365 * drop the part we overwrote.
3367 ret
= drop_adjacent_extents(trans
, log
, inode
, em
, path
);
3368 btrfs_release_path(path
);
3369 path
->really_keep_locks
= 0;
3377 if (em
->compress_type
) {
3379 csum_len
= block_len
;
3383 * First check and see if our csums are on our outstanding ordered
3387 spin_lock_irq(&log
->log_extents_lock
[index
]);
3388 list_for_each_entry(ordered
, &log
->logged_list
[index
], log_list
) {
3389 struct btrfs_ordered_sum
*sum
;
3394 if (ordered
->inode
!= inode
)
3397 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
3398 mod_start
+ mod_len
<= ordered
->file_offset
)
3402 * We are going to copy all the csums on this ordered extent, so
3403 * go ahead and adjust mod_start and mod_len in case this
3404 * ordered extent has already been logged.
3406 if (ordered
->file_offset
> mod_start
) {
3407 if (ordered
->file_offset
+ ordered
->len
>=
3408 mod_start
+ mod_len
)
3409 mod_len
= ordered
->file_offset
- mod_start
;
3411 * If we have this case
3413 * |--------- logged extent ---------|
3414 * |----- ordered extent ----|
3416 * Just don't mess with mod_start and mod_len, we'll
3417 * just end up logging more csums than we need and it
3421 if (ordered
->file_offset
+ ordered
->len
<
3422 mod_start
+ mod_len
) {
3423 mod_len
= (mod_start
+ mod_len
) -
3424 (ordered
->file_offset
+ ordered
->len
);
3425 mod_start
= ordered
->file_offset
+
3433 * To keep us from looping for the above case of an ordered
3434 * extent that falls inside of the logged extent.
3436 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
3439 atomic_inc(&ordered
->refs
);
3440 spin_unlock_irq(&log
->log_extents_lock
[index
]);
3442 * we've dropped the lock, we must either break or
3443 * start over after this.
3446 wait_event(ordered
->wait
, ordered
->csum_bytes_left
== 0);
3448 list_for_each_entry(sum
, &ordered
->list
, list
) {
3449 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
3451 btrfs_put_ordered_extent(ordered
);
3455 btrfs_put_ordered_extent(ordered
);
3459 spin_unlock_irq(&log
->log_extents_lock
[index
]);
3462 if (!mod_len
|| ret
)
3465 csum_offset
= mod_start
- em
->start
;
3468 /* block start is already adjusted for the file extent offset. */
3469 ret
= btrfs_lookup_csums_range(log
->fs_info
->csum_root
,
3470 em
->block_start
+ csum_offset
,
3471 em
->block_start
+ csum_offset
+
3472 csum_len
- 1, &ordered_sums
, 0);
3476 while (!list_empty(&ordered_sums
)) {
3477 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3478 struct btrfs_ordered_sum
,
3481 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3482 list_del(&sums
->list
);
3489 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
3490 struct btrfs_root
*root
,
3491 struct inode
*inode
,
3492 struct btrfs_path
*path
)
3494 struct extent_map
*em
, *n
;
3495 struct list_head extents
;
3496 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
3501 INIT_LIST_HEAD(&extents
);
3503 write_lock(&tree
->lock
);
3504 test_gen
= root
->fs_info
->last_trans_committed
;
3506 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
3507 list_del_init(&em
->list
);
3510 * Just an arbitrary number, this can be really CPU intensive
3511 * once we start getting a lot of extents, and really once we
3512 * have a bunch of extents we just want to commit since it will
3515 if (++num
> 32768) {
3516 list_del_init(&tree
->modified_extents
);
3521 if (em
->generation
<= test_gen
)
3523 /* Need a ref to keep it from getting evicted from cache */
3524 atomic_inc(&em
->refs
);
3525 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
3526 list_add_tail(&em
->list
, &extents
);
3530 list_sort(NULL
, &extents
, extent_cmp
);
3533 while (!list_empty(&extents
)) {
3534 em
= list_entry(extents
.next
, struct extent_map
, list
);
3536 list_del_init(&em
->list
);
3539 * If we had an error we just need to delete everybody from our
3543 clear_em_logging(tree
, em
);
3544 free_extent_map(em
);
3548 write_unlock(&tree
->lock
);
3550 ret
= log_one_extent(trans
, inode
, root
, em
, path
);
3551 write_lock(&tree
->lock
);
3552 clear_em_logging(tree
, em
);
3553 free_extent_map(em
);
3555 WARN_ON(!list_empty(&extents
));
3556 write_unlock(&tree
->lock
);
3558 btrfs_release_path(path
);
3562 /* log a single inode in the tree log.
3563 * At least one parent directory for this inode must exist in the tree
3564 * or be logged already.
3566 * Any items from this inode changed by the current transaction are copied
3567 * to the log tree. An extra reference is taken on any extents in this
3568 * file, allowing us to avoid a whole pile of corner cases around logging
3569 * blocks that have been removed from the tree.
3571 * See LOG_INODE_ALL and related defines for a description of what inode_only
3574 * This handles both files and directories.
3576 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
3577 struct btrfs_root
*root
, struct inode
*inode
,
3580 struct btrfs_path
*path
;
3581 struct btrfs_path
*dst_path
;
3582 struct btrfs_key min_key
;
3583 struct btrfs_key max_key
;
3584 struct btrfs_root
*log
= root
->log_root
;
3585 struct extent_buffer
*src
= NULL
;
3589 int ins_start_slot
= 0;
3591 bool fast_search
= false;
3592 u64 ino
= btrfs_ino(inode
);
3594 log
= root
->log_root
;
3596 path
= btrfs_alloc_path();
3599 dst_path
= btrfs_alloc_path();
3601 btrfs_free_path(path
);
3605 min_key
.objectid
= ino
;
3606 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
3609 max_key
.objectid
= ino
;
3612 /* today the code can only do partial logging of directories */
3613 if (S_ISDIR(inode
->i_mode
) ||
3614 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3615 &BTRFS_I(inode
)->runtime_flags
) &&
3616 inode_only
== LOG_INODE_EXISTS
))
3617 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3619 max_key
.type
= (u8
)-1;
3620 max_key
.offset
= (u64
)-1;
3622 /* Only run delayed items if we are a dir or a new file */
3623 if (S_ISDIR(inode
->i_mode
) ||
3624 BTRFS_I(inode
)->generation
> root
->fs_info
->last_trans_committed
) {
3625 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
3627 btrfs_free_path(path
);
3628 btrfs_free_path(dst_path
);
3633 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
3635 btrfs_get_logged_extents(log
, inode
);
3638 * a brute force approach to making sure we get the most uptodate
3639 * copies of everything.
3641 if (S_ISDIR(inode
->i_mode
)) {
3642 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
3644 if (inode_only
== LOG_INODE_EXISTS
)
3645 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
3646 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
3648 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3649 &BTRFS_I(inode
)->runtime_flags
)) {
3650 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3651 &BTRFS_I(inode
)->runtime_flags
);
3652 ret
= btrfs_truncate_inode_items(trans
, log
,
3654 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3655 &BTRFS_I(inode
)->runtime_flags
)) {
3656 if (inode_only
== LOG_INODE_ALL
)
3658 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3659 ret
= drop_objectid_items(trans
, log
, path
, ino
,
3662 if (inode_only
== LOG_INODE_ALL
)
3664 ret
= log_inode_item(trans
, log
, dst_path
, inode
);
3677 path
->keep_locks
= 1;
3681 ret
= btrfs_search_forward(root
, &min_key
, &max_key
,
3682 path
, trans
->transid
);
3686 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3687 if (min_key
.objectid
!= ino
)
3689 if (min_key
.type
> max_key
.type
)
3692 src
= path
->nodes
[0];
3693 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
3696 } else if (!ins_nr
) {
3697 ins_start_slot
= path
->slots
[0];
3702 ret
= copy_items(trans
, inode
, dst_path
, src
, ins_start_slot
,
3703 ins_nr
, inode_only
);
3709 ins_start_slot
= path
->slots
[0];
3712 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3714 if (path
->slots
[0] < nritems
) {
3715 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
3720 ret
= copy_items(trans
, inode
, dst_path
, src
,
3722 ins_nr
, inode_only
);
3729 btrfs_release_path(path
);
3731 if (min_key
.offset
< (u64
)-1)
3733 else if (min_key
.type
< (u8
)-1)
3735 else if (min_key
.objectid
< (u64
)-1)
3741 ret
= copy_items(trans
, inode
, dst_path
, src
, ins_start_slot
,
3742 ins_nr
, inode_only
);
3752 btrfs_release_path(dst_path
);
3753 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
);
3759 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
3760 struct extent_map
*em
, *n
;
3762 write_lock(&tree
->lock
);
3763 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
)
3764 list_del_init(&em
->list
);
3765 write_unlock(&tree
->lock
);
3768 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->i_mode
)) {
3769 btrfs_release_path(path
);
3770 btrfs_release_path(dst_path
);
3771 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
);
3777 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
3778 BTRFS_I(inode
)->last_log_commit
= BTRFS_I(inode
)->last_sub_trans
;
3781 btrfs_free_logged_extents(log
, log
->log_transid
);
3782 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
3784 btrfs_free_path(path
);
3785 btrfs_free_path(dst_path
);
3790 * follow the dentry parent pointers up the chain and see if any
3791 * of the directories in it require a full commit before they can
3792 * be logged. Returns zero if nothing special needs to be done or 1 if
3793 * a full commit is required.
3795 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
3796 struct inode
*inode
,
3797 struct dentry
*parent
,
3798 struct super_block
*sb
,
3802 struct btrfs_root
*root
;
3803 struct dentry
*old_parent
= NULL
;
3806 * for regular files, if its inode is already on disk, we don't
3807 * have to worry about the parents at all. This is because
3808 * we can use the last_unlink_trans field to record renames
3809 * and other fun in this file.
3811 if (S_ISREG(inode
->i_mode
) &&
3812 BTRFS_I(inode
)->generation
<= last_committed
&&
3813 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
)
3816 if (!S_ISDIR(inode
->i_mode
)) {
3817 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
3819 inode
= parent
->d_inode
;
3823 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
3826 if (BTRFS_I(inode
)->last_unlink_trans
> last_committed
) {
3827 root
= BTRFS_I(inode
)->root
;
3830 * make sure any commits to the log are forced
3831 * to be full commits
3833 root
->fs_info
->last_trans_log_full_commit
=
3839 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
3842 if (IS_ROOT(parent
))
3845 parent
= dget_parent(parent
);
3847 old_parent
= parent
;
3848 inode
= parent
->d_inode
;
3857 * helper function around btrfs_log_inode to make sure newly created
3858 * parent directories also end up in the log. A minimal inode and backref
3859 * only logging is done of any parent directories that are older than
3860 * the last committed transaction
3862 int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
3863 struct btrfs_root
*root
, struct inode
*inode
,
3864 struct dentry
*parent
, int exists_only
)
3866 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
3867 struct super_block
*sb
;
3868 struct dentry
*old_parent
= NULL
;
3870 u64 last_committed
= root
->fs_info
->last_trans_committed
;
3874 if (btrfs_test_opt(root
, NOTREELOG
)) {
3879 if (root
->fs_info
->last_trans_log_full_commit
>
3880 root
->fs_info
->last_trans_committed
) {
3885 if (root
!= BTRFS_I(inode
)->root
||
3886 btrfs_root_refs(&root
->root_item
) == 0) {
3891 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
,
3892 sb
, last_committed
);
3896 if (btrfs_inode_in_log(inode
, trans
->transid
)) {
3897 ret
= BTRFS_NO_LOG_SYNC
;
3901 ret
= start_log_trans(trans
, root
);
3905 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
);
3910 * for regular files, if its inode is already on disk, we don't
3911 * have to worry about the parents at all. This is because
3912 * we can use the last_unlink_trans field to record renames
3913 * and other fun in this file.
3915 if (S_ISREG(inode
->i_mode
) &&
3916 BTRFS_I(inode
)->generation
<= last_committed
&&
3917 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
) {
3922 inode_only
= LOG_INODE_EXISTS
;
3924 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
3927 inode
= parent
->d_inode
;
3928 if (root
!= BTRFS_I(inode
)->root
)
3931 if (BTRFS_I(inode
)->generation
>
3932 root
->fs_info
->last_trans_committed
) {
3933 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
);
3937 if (IS_ROOT(parent
))
3940 parent
= dget_parent(parent
);
3942 old_parent
= parent
;
3948 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
3951 btrfs_end_log_trans(root
);
3957 * it is not safe to log dentry if the chunk root has added new
3958 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
3959 * If this returns 1, you must commit the transaction to safely get your
3962 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
3963 struct btrfs_root
*root
, struct dentry
*dentry
)
3965 struct dentry
*parent
= dget_parent(dentry
);
3968 ret
= btrfs_log_inode_parent(trans
, root
, dentry
->d_inode
, parent
, 0);
3975 * should be called during mount to recover any replay any log trees
3978 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
3981 struct btrfs_path
*path
;
3982 struct btrfs_trans_handle
*trans
;
3983 struct btrfs_key key
;
3984 struct btrfs_key found_key
;
3985 struct btrfs_key tmp_key
;
3986 struct btrfs_root
*log
;
3987 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
3988 struct walk_control wc
= {
3989 .process_func
= process_one_buffer
,
3993 path
= btrfs_alloc_path();
3997 fs_info
->log_root_recovering
= 1;
3999 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
4000 if (IS_ERR(trans
)) {
4001 ret
= PTR_ERR(trans
);
4008 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
4010 btrfs_error(fs_info
, ret
, "Failed to pin buffers while "
4011 "recovering log root tree.");
4016 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
4017 key
.offset
= (u64
)-1;
4018 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
4021 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
4024 btrfs_error(fs_info
, ret
,
4025 "Couldn't find tree log root.");
4029 if (path
->slots
[0] == 0)
4033 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
4035 btrfs_release_path(path
);
4036 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
4039 log
= btrfs_read_fs_root_no_radix(log_root_tree
,
4043 btrfs_error(fs_info
, ret
,
4044 "Couldn't read tree log root.");
4048 tmp_key
.objectid
= found_key
.offset
;
4049 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
4050 tmp_key
.offset
= (u64
)-1;
4052 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
4053 if (IS_ERR(wc
.replay_dest
)) {
4054 ret
= PTR_ERR(wc
.replay_dest
);
4055 btrfs_error(fs_info
, ret
, "Couldn't read target root "
4056 "for tree log recovery.");
4060 wc
.replay_dest
->log_root
= log
;
4061 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
4062 ret
= walk_log_tree(trans
, log
, &wc
);
4065 if (wc
.stage
== LOG_WALK_REPLAY_ALL
) {
4066 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
4071 key
.offset
= found_key
.offset
- 1;
4072 wc
.replay_dest
->log_root
= NULL
;
4073 free_extent_buffer(log
->node
);
4074 free_extent_buffer(log
->commit_root
);
4077 if (found_key
.offset
== 0)
4080 btrfs_release_path(path
);
4082 /* step one is to pin it all, step two is to replay just inodes */
4085 wc
.process_func
= replay_one_buffer
;
4086 wc
.stage
= LOG_WALK_REPLAY_INODES
;
4089 /* step three is to replay everything */
4090 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
4095 btrfs_free_path(path
);
4097 free_extent_buffer(log_root_tree
->node
);
4098 log_root_tree
->log_root
= NULL
;
4099 fs_info
->log_root_recovering
= 0;
4101 /* step 4: commit the transaction, which also unpins the blocks */
4102 btrfs_commit_transaction(trans
, fs_info
->tree_root
);
4104 kfree(log_root_tree
);
4108 btrfs_free_path(path
);
4113 * there are some corner cases where we want to force a full
4114 * commit instead of allowing a directory to be logged.
4116 * They revolve around files there were unlinked from the directory, and
4117 * this function updates the parent directory so that a full commit is
4118 * properly done if it is fsync'd later after the unlinks are done.
4120 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
4121 struct inode
*dir
, struct inode
*inode
,
4125 * when we're logging a file, if it hasn't been renamed
4126 * or unlinked, and its inode is fully committed on disk,
4127 * we don't have to worry about walking up the directory chain
4128 * to log its parents.
4130 * So, we use the last_unlink_trans field to put this transid
4131 * into the file. When the file is logged we check it and
4132 * don't log the parents if the file is fully on disk.
4134 if (S_ISREG(inode
->i_mode
))
4135 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4138 * if this directory was already logged any new
4139 * names for this file/dir will get recorded
4142 if (BTRFS_I(dir
)->logged_trans
== trans
->transid
)
4146 * if the inode we're about to unlink was logged,
4147 * the log will be properly updated for any new names
4149 if (BTRFS_I(inode
)->logged_trans
== trans
->transid
)
4153 * when renaming files across directories, if the directory
4154 * there we're unlinking from gets fsync'd later on, there's
4155 * no way to find the destination directory later and fsync it
4156 * properly. So, we have to be conservative and force commits
4157 * so the new name gets discovered.
4162 /* we can safely do the unlink without any special recording */
4166 BTRFS_I(dir
)->last_unlink_trans
= trans
->transid
;
4170 * Call this after adding a new name for a file and it will properly
4171 * update the log to reflect the new name.
4173 * It will return zero if all goes well, and it will return 1 if a
4174 * full transaction commit is required.
4176 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
4177 struct inode
*inode
, struct inode
*old_dir
,
4178 struct dentry
*parent
)
4180 struct btrfs_root
* root
= BTRFS_I(inode
)->root
;
4183 * this will force the logging code to walk the dentry chain
4186 if (S_ISREG(inode
->i_mode
))
4187 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4190 * if this inode hasn't been logged and directory we're renaming it
4191 * from hasn't been logged, we don't need to log it
4193 if (BTRFS_I(inode
)->logged_trans
<=
4194 root
->fs_info
->last_trans_committed
&&
4195 (!old_dir
|| BTRFS_I(old_dir
)->logged_trans
<=
4196 root
->fs_info
->last_trans_committed
))
4199 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 1);