2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
97 struct btrfs_root
*root
, struct inode
*inode
,
101 struct btrfs_log_ctx
*ctx
);
102 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
103 struct btrfs_root
*root
,
104 struct btrfs_path
*path
, u64 objectid
);
105 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
106 struct btrfs_root
*root
,
107 struct btrfs_root
*log
,
108 struct btrfs_path
*path
,
109 u64 dirid
, int del_all
);
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
139 static int start_log_trans(struct btrfs_trans_handle
*trans
,
140 struct btrfs_root
*root
,
141 struct btrfs_log_ctx
*ctx
)
146 mutex_lock(&root
->log_mutex
);
147 if (root
->log_root
) {
148 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
152 if (!root
->log_start_pid
) {
153 root
->log_start_pid
= current
->pid
;
154 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
155 } else if (root
->log_start_pid
!= current
->pid
) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
159 atomic_inc(&root
->log_batch
);
160 atomic_inc(&root
->log_writers
);
162 index
= root
->log_transid
% 2;
163 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
164 ctx
->log_transid
= root
->log_transid
;
166 mutex_unlock(&root
->log_mutex
);
171 mutex_lock(&root
->fs_info
->tree_log_mutex
);
172 if (!root
->fs_info
->log_root_tree
)
173 ret
= btrfs_init_log_root_tree(trans
, root
->fs_info
);
174 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
178 if (!root
->log_root
) {
179 ret
= btrfs_add_log_tree(trans
, root
);
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
184 root
->log_start_pid
= current
->pid
;
185 atomic_inc(&root
->log_batch
);
186 atomic_inc(&root
->log_writers
);
188 index
= root
->log_transid
% 2;
189 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
190 ctx
->log_transid
= root
->log_transid
;
193 mutex_unlock(&root
->log_mutex
);
198 * returns 0 if there was a log transaction running and we were able
199 * to join, or returns -ENOENT if there were not transactions
202 static int join_running_log_trans(struct btrfs_root
*root
)
210 mutex_lock(&root
->log_mutex
);
211 if (root
->log_root
) {
213 atomic_inc(&root
->log_writers
);
215 mutex_unlock(&root
->log_mutex
);
220 * This either makes the current running log transaction wait
221 * until you call btrfs_end_log_trans() or it makes any future
222 * log transactions wait until you call btrfs_end_log_trans()
224 int btrfs_pin_log_trans(struct btrfs_root
*root
)
228 mutex_lock(&root
->log_mutex
);
229 atomic_inc(&root
->log_writers
);
230 mutex_unlock(&root
->log_mutex
);
235 * indicate we're done making changes to the log tree
236 * and wake up anyone waiting to do a sync
238 void btrfs_end_log_trans(struct btrfs_root
*root
)
240 if (atomic_dec_and_test(&root
->log_writers
)) {
242 if (waitqueue_active(&root
->log_writer_wait
))
243 wake_up(&root
->log_writer_wait
);
249 * the walk control struct is used to pass state down the chain when
250 * processing the log tree. The stage field tells us which part
251 * of the log tree processing we are currently doing. The others
252 * are state fields used for that specific part
254 struct walk_control
{
255 /* should we free the extent on disk when done? This is used
256 * at transaction commit time while freeing a log tree
260 /* should we write out the extent buffer? This is used
261 * while flushing the log tree to disk during a sync
265 /* should we wait for the extent buffer io to finish? Also used
266 * while flushing the log tree to disk for a sync
270 /* pin only walk, we record which extents on disk belong to the
275 /* what stage of the replay code we're currently in */
278 /* the root we are currently replaying */
279 struct btrfs_root
*replay_dest
;
281 /* the trans handle for the current replay */
282 struct btrfs_trans_handle
*trans
;
284 /* the function that gets used to process blocks we find in the
285 * tree. Note the extent_buffer might not be up to date when it is
286 * passed in, and it must be checked or read if you need the data
289 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
290 struct walk_control
*wc
, u64 gen
);
294 * process_func used to pin down extents, write them or wait on them
296 static int process_one_buffer(struct btrfs_root
*log
,
297 struct extent_buffer
*eb
,
298 struct walk_control
*wc
, u64 gen
)
303 * If this fs is mixed then we need to be able to process the leaves to
304 * pin down any logged extents, so we have to read the block.
306 if (btrfs_fs_incompat(log
->fs_info
, MIXED_GROUPS
)) {
307 ret
= btrfs_read_buffer(eb
, gen
);
313 ret
= btrfs_pin_extent_for_log_replay(log
->fs_info
->extent_root
,
316 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
317 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
318 ret
= btrfs_exclude_logged_extents(log
, eb
);
320 btrfs_write_tree_block(eb
);
322 btrfs_wait_tree_block_writeback(eb
);
328 * Item overwrite used by replay and tree logging. eb, slot and key all refer
329 * to the src data we are copying out.
331 * root is the tree we are copying into, and path is a scratch
332 * path for use in this function (it should be released on entry and
333 * will be released on exit).
335 * If the key is already in the destination tree the existing item is
336 * overwritten. If the existing item isn't big enough, it is extended.
337 * If it is too large, it is truncated.
339 * If the key isn't in the destination yet, a new item is inserted.
341 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
342 struct btrfs_root
*root
,
343 struct btrfs_path
*path
,
344 struct extent_buffer
*eb
, int slot
,
345 struct btrfs_key
*key
)
349 u64 saved_i_size
= 0;
350 int save_old_i_size
= 0;
351 unsigned long src_ptr
;
352 unsigned long dst_ptr
;
353 int overwrite_root
= 0;
354 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
356 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
359 item_size
= btrfs_item_size_nr(eb
, slot
);
360 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
362 /* look for the key in the destination tree */
363 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
370 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
372 if (dst_size
!= item_size
)
375 if (item_size
== 0) {
376 btrfs_release_path(path
);
379 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
380 src_copy
= kmalloc(item_size
, GFP_NOFS
);
381 if (!dst_copy
|| !src_copy
) {
382 btrfs_release_path(path
);
388 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
390 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
391 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
393 ret
= memcmp(dst_copy
, src_copy
, item_size
);
398 * they have the same contents, just return, this saves
399 * us from cowing blocks in the destination tree and doing
400 * extra writes that may not have been done by a previous
404 btrfs_release_path(path
);
409 * We need to load the old nbytes into the inode so when we
410 * replay the extents we've logged we get the right nbytes.
413 struct btrfs_inode_item
*item
;
417 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
418 struct btrfs_inode_item
);
419 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
420 item
= btrfs_item_ptr(eb
, slot
,
421 struct btrfs_inode_item
);
422 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
425 * If this is a directory we need to reset the i_size to
426 * 0 so that we can set it up properly when replaying
427 * the rest of the items in this log.
429 mode
= btrfs_inode_mode(eb
, item
);
431 btrfs_set_inode_size(eb
, item
, 0);
433 } else if (inode_item
) {
434 struct btrfs_inode_item
*item
;
438 * New inode, set nbytes to 0 so that the nbytes comes out
439 * properly when we replay the extents.
441 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
442 btrfs_set_inode_nbytes(eb
, item
, 0);
445 * If this is a directory we need to reset the i_size to 0 so
446 * that we can set it up properly when replaying the rest of
447 * the items in this log.
449 mode
= btrfs_inode_mode(eb
, item
);
451 btrfs_set_inode_size(eb
, item
, 0);
454 btrfs_release_path(path
);
455 /* try to insert the key into the destination tree */
456 ret
= btrfs_insert_empty_item(trans
, root
, path
,
459 /* make sure any existing item is the correct size */
460 if (ret
== -EEXIST
) {
462 found_size
= btrfs_item_size_nr(path
->nodes
[0],
464 if (found_size
> item_size
)
465 btrfs_truncate_item(root
, path
, item_size
, 1);
466 else if (found_size
< item_size
)
467 btrfs_extend_item(root
, path
,
468 item_size
- found_size
);
472 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
475 /* don't overwrite an existing inode if the generation number
476 * was logged as zero. This is done when the tree logging code
477 * is just logging an inode to make sure it exists after recovery.
479 * Also, don't overwrite i_size on directories during replay.
480 * log replay inserts and removes directory items based on the
481 * state of the tree found in the subvolume, and i_size is modified
484 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
485 struct btrfs_inode_item
*src_item
;
486 struct btrfs_inode_item
*dst_item
;
488 src_item
= (struct btrfs_inode_item
*)src_ptr
;
489 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
491 if (btrfs_inode_generation(eb
, src_item
) == 0)
494 if (overwrite_root
&&
495 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
496 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
498 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
503 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
506 if (save_old_i_size
) {
507 struct btrfs_inode_item
*dst_item
;
508 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
509 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
512 /* make sure the generation is filled in */
513 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
514 struct btrfs_inode_item
*dst_item
;
515 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
516 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
517 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
522 btrfs_mark_buffer_dirty(path
->nodes
[0]);
523 btrfs_release_path(path
);
528 * simple helper to read an inode off the disk from a given root
529 * This can only be called for subvolume roots and not for the log
531 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
534 struct btrfs_key key
;
537 key
.objectid
= objectid
;
538 key
.type
= BTRFS_INODE_ITEM_KEY
;
540 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
543 } else if (is_bad_inode(inode
)) {
550 /* replays a single extent in 'eb' at 'slot' with 'key' into the
551 * subvolume 'root'. path is released on entry and should be released
554 * extents in the log tree have not been allocated out of the extent
555 * tree yet. So, this completes the allocation, taking a reference
556 * as required if the extent already exists or creating a new extent
557 * if it isn't in the extent allocation tree yet.
559 * The extent is inserted into the file, dropping any existing extents
560 * from the file that overlap the new one.
562 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
563 struct btrfs_root
*root
,
564 struct btrfs_path
*path
,
565 struct extent_buffer
*eb
, int slot
,
566 struct btrfs_key
*key
)
570 u64 start
= key
->offset
;
572 struct btrfs_file_extent_item
*item
;
573 struct inode
*inode
= NULL
;
577 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
578 found_type
= btrfs_file_extent_type(eb
, item
);
580 if (found_type
== BTRFS_FILE_EXTENT_REG
||
581 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
582 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
583 extent_end
= start
+ nbytes
;
586 * We don't add to the inodes nbytes if we are prealloc or a
589 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
591 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
592 size
= btrfs_file_extent_inline_len(eb
, slot
, item
);
593 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
594 extent_end
= ALIGN(start
+ size
, root
->sectorsize
);
600 inode
= read_one_inode(root
, key
->objectid
);
607 * first check to see if we already have this extent in the
608 * file. This must be done before the btrfs_drop_extents run
609 * so we don't try to drop this extent.
611 ret
= btrfs_lookup_file_extent(trans
, root
, path
, btrfs_ino(inode
),
615 (found_type
== BTRFS_FILE_EXTENT_REG
||
616 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
617 struct btrfs_file_extent_item cmp1
;
618 struct btrfs_file_extent_item cmp2
;
619 struct btrfs_file_extent_item
*existing
;
620 struct extent_buffer
*leaf
;
622 leaf
= path
->nodes
[0];
623 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
624 struct btrfs_file_extent_item
);
626 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
628 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
632 * we already have a pointer to this exact extent,
633 * we don't have to do anything
635 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
636 btrfs_release_path(path
);
640 btrfs_release_path(path
);
642 /* drop any overlapping extents */
643 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
647 if (found_type
== BTRFS_FILE_EXTENT_REG
||
648 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
650 unsigned long dest_offset
;
651 struct btrfs_key ins
;
653 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
657 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
659 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
660 (unsigned long)item
, sizeof(*item
));
662 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
663 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
664 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
665 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
667 if (ins
.objectid
> 0) {
670 LIST_HEAD(ordered_sums
);
672 * is this extent already allocated in the extent
673 * allocation tree? If so, just add a reference
675 ret
= btrfs_lookup_extent(root
, ins
.objectid
,
678 ret
= btrfs_inc_extent_ref(trans
, root
,
679 ins
.objectid
, ins
.offset
,
680 0, root
->root_key
.objectid
,
681 key
->objectid
, offset
, 0);
686 * insert the extent pointer in the extent
689 ret
= btrfs_alloc_logged_file_extent(trans
,
690 root
, root
->root_key
.objectid
,
691 key
->objectid
, offset
, &ins
);
695 btrfs_release_path(path
);
697 if (btrfs_file_extent_compression(eb
, item
)) {
698 csum_start
= ins
.objectid
;
699 csum_end
= csum_start
+ ins
.offset
;
701 csum_start
= ins
.objectid
+
702 btrfs_file_extent_offset(eb
, item
);
703 csum_end
= csum_start
+
704 btrfs_file_extent_num_bytes(eb
, item
);
707 ret
= btrfs_lookup_csums_range(root
->log_root
,
708 csum_start
, csum_end
- 1,
712 while (!list_empty(&ordered_sums
)) {
713 struct btrfs_ordered_sum
*sums
;
714 sums
= list_entry(ordered_sums
.next
,
715 struct btrfs_ordered_sum
,
718 ret
= btrfs_csum_file_blocks(trans
,
719 root
->fs_info
->csum_root
,
721 list_del(&sums
->list
);
727 btrfs_release_path(path
);
729 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
730 /* inline extents are easy, we just overwrite them */
731 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
736 inode_add_bytes(inode
, nbytes
);
737 ret
= btrfs_update_inode(trans
, root
, inode
);
745 * when cleaning up conflicts between the directory names in the
746 * subvolume, directory names in the log and directory names in the
747 * inode back references, we may have to unlink inodes from directories.
749 * This is a helper function to do the unlink of a specific directory
752 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
753 struct btrfs_root
*root
,
754 struct btrfs_path
*path
,
756 struct btrfs_dir_item
*di
)
761 struct extent_buffer
*leaf
;
762 struct btrfs_key location
;
765 leaf
= path
->nodes
[0];
767 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
768 name_len
= btrfs_dir_name_len(leaf
, di
);
769 name
= kmalloc(name_len
, GFP_NOFS
);
773 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
774 btrfs_release_path(path
);
776 inode
= read_one_inode(root
, location
.objectid
);
782 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
786 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
790 ret
= btrfs_run_delayed_items(trans
, root
);
798 * helper function to see if a given name and sequence number found
799 * in an inode back reference are already in a directory and correctly
800 * point to this inode
802 static noinline
int inode_in_dir(struct btrfs_root
*root
,
803 struct btrfs_path
*path
,
804 u64 dirid
, u64 objectid
, u64 index
,
805 const char *name
, int name_len
)
807 struct btrfs_dir_item
*di
;
808 struct btrfs_key location
;
811 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
812 index
, name
, name_len
, 0);
813 if (di
&& !IS_ERR(di
)) {
814 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
815 if (location
.objectid
!= objectid
)
819 btrfs_release_path(path
);
821 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
822 if (di
&& !IS_ERR(di
)) {
823 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
824 if (location
.objectid
!= objectid
)
830 btrfs_release_path(path
);
835 * helper function to check a log tree for a named back reference in
836 * an inode. This is used to decide if a back reference that is
837 * found in the subvolume conflicts with what we find in the log.
839 * inode backreferences may have multiple refs in a single item,
840 * during replay we process one reference at a time, and we don't
841 * want to delete valid links to a file from the subvolume if that
842 * link is also in the log.
844 static noinline
int backref_in_log(struct btrfs_root
*log
,
845 struct btrfs_key
*key
,
847 char *name
, int namelen
)
849 struct btrfs_path
*path
;
850 struct btrfs_inode_ref
*ref
;
852 unsigned long ptr_end
;
853 unsigned long name_ptr
;
859 path
= btrfs_alloc_path();
863 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
867 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
869 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
870 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
871 name
, namelen
, NULL
))
877 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
878 ptr_end
= ptr
+ item_size
;
879 while (ptr
< ptr_end
) {
880 ref
= (struct btrfs_inode_ref
*)ptr
;
881 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
882 if (found_name_len
== namelen
) {
883 name_ptr
= (unsigned long)(ref
+ 1);
884 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
891 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
894 btrfs_free_path(path
);
898 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
899 struct btrfs_root
*root
,
900 struct btrfs_path
*path
,
901 struct btrfs_root
*log_root
,
902 struct inode
*dir
, struct inode
*inode
,
903 struct extent_buffer
*eb
,
904 u64 inode_objectid
, u64 parent_objectid
,
905 u64 ref_index
, char *name
, int namelen
,
911 struct extent_buffer
*leaf
;
912 struct btrfs_dir_item
*di
;
913 struct btrfs_key search_key
;
914 struct btrfs_inode_extref
*extref
;
917 /* Search old style refs */
918 search_key
.objectid
= inode_objectid
;
919 search_key
.type
= BTRFS_INODE_REF_KEY
;
920 search_key
.offset
= parent_objectid
;
921 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
923 struct btrfs_inode_ref
*victim_ref
;
925 unsigned long ptr_end
;
927 leaf
= path
->nodes
[0];
929 /* are we trying to overwrite a back ref for the root directory
930 * if so, just jump out, we're done
932 if (search_key
.objectid
== search_key
.offset
)
935 /* check all the names in this back reference to see
936 * if they are in the log. if so, we allow them to stay
937 * otherwise they must be unlinked as a conflict
939 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
940 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
941 while (ptr
< ptr_end
) {
942 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
943 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
945 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
949 read_extent_buffer(leaf
, victim_name
,
950 (unsigned long)(victim_ref
+ 1),
953 if (!backref_in_log(log_root
, &search_key
,
958 btrfs_release_path(path
);
960 ret
= btrfs_unlink_inode(trans
, root
, dir
,
966 ret
= btrfs_run_delayed_items(trans
, root
);
974 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
978 * NOTE: we have searched root tree and checked the
979 * coresponding ref, it does not need to check again.
983 btrfs_release_path(path
);
985 /* Same search but for extended refs */
986 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
987 inode_objectid
, parent_objectid
, 0,
989 if (!IS_ERR_OR_NULL(extref
)) {
993 struct inode
*victim_parent
;
995 leaf
= path
->nodes
[0];
997 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
998 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1000 while (cur_offset
< item_size
) {
1001 extref
= (struct btrfs_inode_extref
*)base
+ cur_offset
;
1003 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1005 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1008 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1011 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1014 search_key
.objectid
= inode_objectid
;
1015 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1016 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1020 if (!backref_in_log(log_root
, &search_key
,
1021 parent_objectid
, victim_name
,
1024 victim_parent
= read_one_inode(root
,
1026 if (victim_parent
) {
1028 btrfs_release_path(path
);
1030 ret
= btrfs_unlink_inode(trans
, root
,
1036 ret
= btrfs_run_delayed_items(
1039 iput(victim_parent
);
1050 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1054 btrfs_release_path(path
);
1056 /* look for a conflicting sequence number */
1057 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1058 ref_index
, name
, namelen
, 0);
1059 if (di
&& !IS_ERR(di
)) {
1060 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1064 btrfs_release_path(path
);
1066 /* look for a conflicing name */
1067 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1069 if (di
&& !IS_ERR(di
)) {
1070 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1074 btrfs_release_path(path
);
1079 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1080 u32
*namelen
, char **name
, u64
*index
,
1081 u64
*parent_objectid
)
1083 struct btrfs_inode_extref
*extref
;
1085 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1087 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1088 *name
= kmalloc(*namelen
, GFP_NOFS
);
1092 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1095 *index
= btrfs_inode_extref_index(eb
, extref
);
1096 if (parent_objectid
)
1097 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1102 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1103 u32
*namelen
, char **name
, u64
*index
)
1105 struct btrfs_inode_ref
*ref
;
1107 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1109 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1110 *name
= kmalloc(*namelen
, GFP_NOFS
);
1114 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1116 *index
= btrfs_inode_ref_index(eb
, ref
);
1122 * replay one inode back reference item found in the log tree.
1123 * eb, slot and key refer to the buffer and key found in the log tree.
1124 * root is the destination we are replaying into, and path is for temp
1125 * use by this function. (it should be released on return).
1127 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1128 struct btrfs_root
*root
,
1129 struct btrfs_root
*log
,
1130 struct btrfs_path
*path
,
1131 struct extent_buffer
*eb
, int slot
,
1132 struct btrfs_key
*key
)
1134 struct inode
*dir
= NULL
;
1135 struct inode
*inode
= NULL
;
1136 unsigned long ref_ptr
;
1137 unsigned long ref_end
;
1141 int search_done
= 0;
1142 int log_ref_ver
= 0;
1143 u64 parent_objectid
;
1146 int ref_struct_size
;
1148 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1149 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1151 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1152 struct btrfs_inode_extref
*r
;
1154 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1156 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1157 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1159 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1160 parent_objectid
= key
->offset
;
1162 inode_objectid
= key
->objectid
;
1165 * it is possible that we didn't log all the parent directories
1166 * for a given inode. If we don't find the dir, just don't
1167 * copy the back ref in. The link count fixup code will take
1170 dir
= read_one_inode(root
, parent_objectid
);
1176 inode
= read_one_inode(root
, inode_objectid
);
1182 while (ref_ptr
< ref_end
) {
1184 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1185 &ref_index
, &parent_objectid
);
1187 * parent object can change from one array
1191 dir
= read_one_inode(root
, parent_objectid
);
1197 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1203 /* if we already have a perfect match, we're done */
1204 if (!inode_in_dir(root
, path
, btrfs_ino(dir
), btrfs_ino(inode
),
1205 ref_index
, name
, namelen
)) {
1207 * look for a conflicting back reference in the
1208 * metadata. if we find one we have to unlink that name
1209 * of the file before we add our new link. Later on, we
1210 * overwrite any existing back reference, and we don't
1211 * want to create dangling pointers in the directory.
1215 ret
= __add_inode_ref(trans
, root
, path
, log
,
1219 ref_index
, name
, namelen
,
1228 /* insert our name */
1229 ret
= btrfs_add_link(trans
, dir
, inode
, name
, namelen
,
1234 btrfs_update_inode(trans
, root
, inode
);
1237 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1246 /* finally write the back reference in the inode */
1247 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1249 btrfs_release_path(path
);
1256 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1257 struct btrfs_root
*root
, u64 offset
)
1260 ret
= btrfs_find_item(root
, NULL
, BTRFS_ORPHAN_OBJECTID
,
1261 offset
, BTRFS_ORPHAN_ITEM_KEY
, NULL
);
1263 ret
= btrfs_insert_orphan_item(trans
, root
, offset
);
1267 static int count_inode_extrefs(struct btrfs_root
*root
,
1268 struct inode
*inode
, struct btrfs_path
*path
)
1272 unsigned int nlink
= 0;
1275 u64 inode_objectid
= btrfs_ino(inode
);
1278 struct btrfs_inode_extref
*extref
;
1279 struct extent_buffer
*leaf
;
1282 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1287 leaf
= path
->nodes
[0];
1288 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1289 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1291 while (cur_offset
< item_size
) {
1292 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1293 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1297 cur_offset
+= name_len
+ sizeof(*extref
);
1301 btrfs_release_path(path
);
1303 btrfs_release_path(path
);
1310 static int count_inode_refs(struct btrfs_root
*root
,
1311 struct inode
*inode
, struct btrfs_path
*path
)
1314 struct btrfs_key key
;
1315 unsigned int nlink
= 0;
1317 unsigned long ptr_end
;
1319 u64 ino
= btrfs_ino(inode
);
1322 key
.type
= BTRFS_INODE_REF_KEY
;
1323 key
.offset
= (u64
)-1;
1326 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1330 if (path
->slots
[0] == 0)
1335 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1337 if (key
.objectid
!= ino
||
1338 key
.type
!= BTRFS_INODE_REF_KEY
)
1340 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1341 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1343 while (ptr
< ptr_end
) {
1344 struct btrfs_inode_ref
*ref
;
1346 ref
= (struct btrfs_inode_ref
*)ptr
;
1347 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1349 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1353 if (key
.offset
== 0)
1355 if (path
->slots
[0] > 0) {
1360 btrfs_release_path(path
);
1362 btrfs_release_path(path
);
1368 * There are a few corners where the link count of the file can't
1369 * be properly maintained during replay. So, instead of adding
1370 * lots of complexity to the log code, we just scan the backrefs
1371 * for any file that has been through replay.
1373 * The scan will update the link count on the inode to reflect the
1374 * number of back refs found. If it goes down to zero, the iput
1375 * will free the inode.
1377 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1378 struct btrfs_root
*root
,
1379 struct inode
*inode
)
1381 struct btrfs_path
*path
;
1384 u64 ino
= btrfs_ino(inode
);
1386 path
= btrfs_alloc_path();
1390 ret
= count_inode_refs(root
, inode
, path
);
1396 ret
= count_inode_extrefs(root
, inode
, path
);
1407 if (nlink
!= inode
->i_nlink
) {
1408 set_nlink(inode
, nlink
);
1409 btrfs_update_inode(trans
, root
, inode
);
1411 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1413 if (inode
->i_nlink
== 0) {
1414 if (S_ISDIR(inode
->i_mode
)) {
1415 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1420 ret
= insert_orphan_item(trans
, root
, ino
);
1424 btrfs_free_path(path
);
1428 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1429 struct btrfs_root
*root
,
1430 struct btrfs_path
*path
)
1433 struct btrfs_key key
;
1434 struct inode
*inode
;
1436 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1437 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1438 key
.offset
= (u64
)-1;
1440 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1445 if (path
->slots
[0] == 0)
1450 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1451 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1452 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1455 ret
= btrfs_del_item(trans
, root
, path
);
1459 btrfs_release_path(path
);
1460 inode
= read_one_inode(root
, key
.offset
);
1464 ret
= fixup_inode_link_count(trans
, root
, inode
);
1470 * fixup on a directory may create new entries,
1471 * make sure we always look for the highset possible
1474 key
.offset
= (u64
)-1;
1478 btrfs_release_path(path
);
1484 * record a given inode in the fixup dir so we can check its link
1485 * count when replay is done. The link count is incremented here
1486 * so the inode won't go away until we check it
1488 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1489 struct btrfs_root
*root
,
1490 struct btrfs_path
*path
,
1493 struct btrfs_key key
;
1495 struct inode
*inode
;
1497 inode
= read_one_inode(root
, objectid
);
1501 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1502 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1503 key
.offset
= objectid
;
1505 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1507 btrfs_release_path(path
);
1509 if (!inode
->i_nlink
)
1510 set_nlink(inode
, 1);
1513 ret
= btrfs_update_inode(trans
, root
, inode
);
1514 } else if (ret
== -EEXIST
) {
1517 BUG(); /* Logic Error */
1525 * when replaying the log for a directory, we only insert names
1526 * for inodes that actually exist. This means an fsync on a directory
1527 * does not implicitly fsync all the new files in it
1529 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1530 struct btrfs_root
*root
,
1531 struct btrfs_path
*path
,
1532 u64 dirid
, u64 index
,
1533 char *name
, int name_len
, u8 type
,
1534 struct btrfs_key
*location
)
1536 struct inode
*inode
;
1540 inode
= read_one_inode(root
, location
->objectid
);
1544 dir
= read_one_inode(root
, dirid
);
1550 ret
= btrfs_add_link(trans
, dir
, inode
, name
, name_len
, 1, index
);
1552 /* FIXME, put inode into FIXUP list */
1560 * take a single entry in a log directory item and replay it into
1563 * if a conflicting item exists in the subdirectory already,
1564 * the inode it points to is unlinked and put into the link count
1567 * If a name from the log points to a file or directory that does
1568 * not exist in the FS, it is skipped. fsyncs on directories
1569 * do not force down inodes inside that directory, just changes to the
1570 * names or unlinks in a directory.
1572 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1573 struct btrfs_root
*root
,
1574 struct btrfs_path
*path
,
1575 struct extent_buffer
*eb
,
1576 struct btrfs_dir_item
*di
,
1577 struct btrfs_key
*key
)
1581 struct btrfs_dir_item
*dst_di
;
1582 struct btrfs_key found_key
;
1583 struct btrfs_key log_key
;
1588 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1590 dir
= read_one_inode(root
, key
->objectid
);
1594 name_len
= btrfs_dir_name_len(eb
, di
);
1595 name
= kmalloc(name_len
, GFP_NOFS
);
1601 log_type
= btrfs_dir_type(eb
, di
);
1602 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1605 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1606 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1611 btrfs_release_path(path
);
1613 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1614 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1616 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1617 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1626 if (IS_ERR_OR_NULL(dst_di
)) {
1627 /* we need a sequence number to insert, so we only
1628 * do inserts for the BTRFS_DIR_INDEX_KEY types
1630 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1635 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1636 /* the existing item matches the logged item */
1637 if (found_key
.objectid
== log_key
.objectid
&&
1638 found_key
.type
== log_key
.type
&&
1639 found_key
.offset
== log_key
.offset
&&
1640 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1641 update_size
= false;
1646 * don't drop the conflicting directory entry if the inode
1647 * for the new entry doesn't exist
1652 ret
= drop_one_dir_item(trans
, root
, path
, dir
, dst_di
);
1656 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1659 btrfs_release_path(path
);
1660 if (!ret
&& update_size
) {
1661 btrfs_i_size_write(dir
, dir
->i_size
+ name_len
* 2);
1662 ret
= btrfs_update_inode(trans
, root
, dir
);
1669 btrfs_release_path(path
);
1670 ret
= insert_one_name(trans
, root
, path
, key
->objectid
, key
->offset
,
1671 name
, name_len
, log_type
, &log_key
);
1672 if (ret
&& ret
!= -ENOENT
)
1674 update_size
= false;
1680 * find all the names in a directory item and reconcile them into
1681 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1682 * one name in a directory item, but the same code gets used for
1683 * both directory index types
1685 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1686 struct btrfs_root
*root
,
1687 struct btrfs_path
*path
,
1688 struct extent_buffer
*eb
, int slot
,
1689 struct btrfs_key
*key
)
1692 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1693 struct btrfs_dir_item
*di
;
1696 unsigned long ptr_end
;
1698 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1699 ptr_end
= ptr
+ item_size
;
1700 while (ptr
< ptr_end
) {
1701 di
= (struct btrfs_dir_item
*)ptr
;
1702 if (verify_dir_item(root
, eb
, di
))
1704 name_len
= btrfs_dir_name_len(eb
, di
);
1705 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1708 ptr
= (unsigned long)(di
+ 1);
1715 * directory replay has two parts. There are the standard directory
1716 * items in the log copied from the subvolume, and range items
1717 * created in the log while the subvolume was logged.
1719 * The range items tell us which parts of the key space the log
1720 * is authoritative for. During replay, if a key in the subvolume
1721 * directory is in a logged range item, but not actually in the log
1722 * that means it was deleted from the directory before the fsync
1723 * and should be removed.
1725 static noinline
int find_dir_range(struct btrfs_root
*root
,
1726 struct btrfs_path
*path
,
1727 u64 dirid
, int key_type
,
1728 u64
*start_ret
, u64
*end_ret
)
1730 struct btrfs_key key
;
1732 struct btrfs_dir_log_item
*item
;
1736 if (*start_ret
== (u64
)-1)
1739 key
.objectid
= dirid
;
1740 key
.type
= key_type
;
1741 key
.offset
= *start_ret
;
1743 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1747 if (path
->slots
[0] == 0)
1752 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1754 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1758 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1759 struct btrfs_dir_log_item
);
1760 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1762 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1764 *start_ret
= key
.offset
;
1765 *end_ret
= found_end
;
1770 /* check the next slot in the tree to see if it is a valid item */
1771 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1772 if (path
->slots
[0] >= nritems
) {
1773 ret
= btrfs_next_leaf(root
, path
);
1780 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1782 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1786 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1787 struct btrfs_dir_log_item
);
1788 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1789 *start_ret
= key
.offset
;
1790 *end_ret
= found_end
;
1793 btrfs_release_path(path
);
1798 * this looks for a given directory item in the log. If the directory
1799 * item is not in the log, the item is removed and the inode it points
1802 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
1803 struct btrfs_root
*root
,
1804 struct btrfs_root
*log
,
1805 struct btrfs_path
*path
,
1806 struct btrfs_path
*log_path
,
1808 struct btrfs_key
*dir_key
)
1811 struct extent_buffer
*eb
;
1814 struct btrfs_dir_item
*di
;
1815 struct btrfs_dir_item
*log_di
;
1818 unsigned long ptr_end
;
1820 struct inode
*inode
;
1821 struct btrfs_key location
;
1824 eb
= path
->nodes
[0];
1825 slot
= path
->slots
[0];
1826 item_size
= btrfs_item_size_nr(eb
, slot
);
1827 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1828 ptr_end
= ptr
+ item_size
;
1829 while (ptr
< ptr_end
) {
1830 di
= (struct btrfs_dir_item
*)ptr
;
1831 if (verify_dir_item(root
, eb
, di
)) {
1836 name_len
= btrfs_dir_name_len(eb
, di
);
1837 name
= kmalloc(name_len
, GFP_NOFS
);
1842 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1845 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
1846 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
1849 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
1850 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
1856 if (!log_di
|| (IS_ERR(log_di
) && PTR_ERR(log_di
) == -ENOENT
)) {
1857 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
1858 btrfs_release_path(path
);
1859 btrfs_release_path(log_path
);
1860 inode
= read_one_inode(root
, location
.objectid
);
1866 ret
= link_to_fixup_dir(trans
, root
,
1867 path
, location
.objectid
);
1875 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1878 ret
= btrfs_run_delayed_items(trans
, root
);
1884 /* there might still be more names under this key
1885 * check and repeat if required
1887 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
1893 } else if (IS_ERR(log_di
)) {
1895 return PTR_ERR(log_di
);
1897 btrfs_release_path(log_path
);
1900 ptr
= (unsigned long)(di
+ 1);
1905 btrfs_release_path(path
);
1906 btrfs_release_path(log_path
);
1911 * deletion replay happens before we copy any new directory items
1912 * out of the log or out of backreferences from inodes. It
1913 * scans the log to find ranges of keys that log is authoritative for,
1914 * and then scans the directory to find items in those ranges that are
1915 * not present in the log.
1917 * Anything we don't find in the log is unlinked and removed from the
1920 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
1921 struct btrfs_root
*root
,
1922 struct btrfs_root
*log
,
1923 struct btrfs_path
*path
,
1924 u64 dirid
, int del_all
)
1928 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
1930 struct btrfs_key dir_key
;
1931 struct btrfs_key found_key
;
1932 struct btrfs_path
*log_path
;
1935 dir_key
.objectid
= dirid
;
1936 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
1937 log_path
= btrfs_alloc_path();
1941 dir
= read_one_inode(root
, dirid
);
1942 /* it isn't an error if the inode isn't there, that can happen
1943 * because we replay the deletes before we copy in the inode item
1947 btrfs_free_path(log_path
);
1955 range_end
= (u64
)-1;
1957 ret
= find_dir_range(log
, path
, dirid
, key_type
,
1958 &range_start
, &range_end
);
1963 dir_key
.offset
= range_start
;
1966 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
1971 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1972 if (path
->slots
[0] >= nritems
) {
1973 ret
= btrfs_next_leaf(root
, path
);
1977 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1979 if (found_key
.objectid
!= dirid
||
1980 found_key
.type
!= dir_key
.type
)
1983 if (found_key
.offset
> range_end
)
1986 ret
= check_item_in_log(trans
, root
, log
, path
,
1991 if (found_key
.offset
== (u64
)-1)
1993 dir_key
.offset
= found_key
.offset
+ 1;
1995 btrfs_release_path(path
);
1996 if (range_end
== (u64
)-1)
1998 range_start
= range_end
+ 1;
2003 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2004 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2005 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2006 btrfs_release_path(path
);
2010 btrfs_release_path(path
);
2011 btrfs_free_path(log_path
);
2017 * the process_func used to replay items from the log tree. This
2018 * gets called in two different stages. The first stage just looks
2019 * for inodes and makes sure they are all copied into the subvolume.
2021 * The second stage copies all the other item types from the log into
2022 * the subvolume. The two stage approach is slower, but gets rid of
2023 * lots of complexity around inodes referencing other inodes that exist
2024 * only in the log (references come from either directory items or inode
2027 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2028 struct walk_control
*wc
, u64 gen
)
2031 struct btrfs_path
*path
;
2032 struct btrfs_root
*root
= wc
->replay_dest
;
2033 struct btrfs_key key
;
2038 ret
= btrfs_read_buffer(eb
, gen
);
2042 level
= btrfs_header_level(eb
);
2047 path
= btrfs_alloc_path();
2051 nritems
= btrfs_header_nritems(eb
);
2052 for (i
= 0; i
< nritems
; i
++) {
2053 btrfs_item_key_to_cpu(eb
, &key
, i
);
2055 /* inode keys are done during the first stage */
2056 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2057 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2058 struct btrfs_inode_item
*inode_item
;
2061 inode_item
= btrfs_item_ptr(eb
, i
,
2062 struct btrfs_inode_item
);
2063 mode
= btrfs_inode_mode(eb
, inode_item
);
2064 if (S_ISDIR(mode
)) {
2065 ret
= replay_dir_deletes(wc
->trans
,
2066 root
, log
, path
, key
.objectid
, 0);
2070 ret
= overwrite_item(wc
->trans
, root
, path
,
2075 /* for regular files, make sure corresponding
2076 * orhpan item exist. extents past the new EOF
2077 * will be truncated later by orphan cleanup.
2079 if (S_ISREG(mode
)) {
2080 ret
= insert_orphan_item(wc
->trans
, root
,
2086 ret
= link_to_fixup_dir(wc
->trans
, root
,
2087 path
, key
.objectid
);
2092 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2093 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2094 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2100 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2103 /* these keys are simply copied */
2104 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2105 ret
= overwrite_item(wc
->trans
, root
, path
,
2109 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2110 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2111 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2113 if (ret
&& ret
!= -ENOENT
)
2116 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2117 ret
= replay_one_extent(wc
->trans
, root
, path
,
2121 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2122 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2128 btrfs_free_path(path
);
2132 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2133 struct btrfs_root
*root
,
2134 struct btrfs_path
*path
, int *level
,
2135 struct walk_control
*wc
)
2140 struct extent_buffer
*next
;
2141 struct extent_buffer
*cur
;
2142 struct extent_buffer
*parent
;
2146 WARN_ON(*level
< 0);
2147 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2149 while (*level
> 0) {
2150 WARN_ON(*level
< 0);
2151 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2152 cur
= path
->nodes
[*level
];
2154 WARN_ON(btrfs_header_level(cur
) != *level
);
2156 if (path
->slots
[*level
] >=
2157 btrfs_header_nritems(cur
))
2160 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2161 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2162 blocksize
= root
->nodesize
;
2164 parent
= path
->nodes
[*level
];
2165 root_owner
= btrfs_header_owner(parent
);
2167 next
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
2172 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2174 free_extent_buffer(next
);
2178 path
->slots
[*level
]++;
2180 ret
= btrfs_read_buffer(next
, ptr_gen
);
2182 free_extent_buffer(next
);
2187 btrfs_tree_lock(next
);
2188 btrfs_set_lock_blocking(next
);
2189 clean_tree_block(trans
, root
, next
);
2190 btrfs_wait_tree_block_writeback(next
);
2191 btrfs_tree_unlock(next
);
2194 WARN_ON(root_owner
!=
2195 BTRFS_TREE_LOG_OBJECTID
);
2196 ret
= btrfs_free_and_pin_reserved_extent(root
,
2199 free_extent_buffer(next
);
2203 free_extent_buffer(next
);
2206 ret
= btrfs_read_buffer(next
, ptr_gen
);
2208 free_extent_buffer(next
);
2212 WARN_ON(*level
<= 0);
2213 if (path
->nodes
[*level
-1])
2214 free_extent_buffer(path
->nodes
[*level
-1]);
2215 path
->nodes
[*level
-1] = next
;
2216 *level
= btrfs_header_level(next
);
2217 path
->slots
[*level
] = 0;
2220 WARN_ON(*level
< 0);
2221 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2223 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2229 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2230 struct btrfs_root
*root
,
2231 struct btrfs_path
*path
, int *level
,
2232 struct walk_control
*wc
)
2239 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2240 slot
= path
->slots
[i
];
2241 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2244 WARN_ON(*level
== 0);
2247 struct extent_buffer
*parent
;
2248 if (path
->nodes
[*level
] == root
->node
)
2249 parent
= path
->nodes
[*level
];
2251 parent
= path
->nodes
[*level
+ 1];
2253 root_owner
= btrfs_header_owner(parent
);
2254 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2255 btrfs_header_generation(path
->nodes
[*level
]));
2260 struct extent_buffer
*next
;
2262 next
= path
->nodes
[*level
];
2265 btrfs_tree_lock(next
);
2266 btrfs_set_lock_blocking(next
);
2267 clean_tree_block(trans
, root
, next
);
2268 btrfs_wait_tree_block_writeback(next
);
2269 btrfs_tree_unlock(next
);
2272 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2273 ret
= btrfs_free_and_pin_reserved_extent(root
,
2274 path
->nodes
[*level
]->start
,
2275 path
->nodes
[*level
]->len
);
2279 free_extent_buffer(path
->nodes
[*level
]);
2280 path
->nodes
[*level
] = NULL
;
2288 * drop the reference count on the tree rooted at 'snap'. This traverses
2289 * the tree freeing any blocks that have a ref count of zero after being
2292 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2293 struct btrfs_root
*log
, struct walk_control
*wc
)
2298 struct btrfs_path
*path
;
2301 path
= btrfs_alloc_path();
2305 level
= btrfs_header_level(log
->node
);
2307 path
->nodes
[level
] = log
->node
;
2308 extent_buffer_get(log
->node
);
2309 path
->slots
[level
] = 0;
2312 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2320 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2329 /* was the root node processed? if not, catch it here */
2330 if (path
->nodes
[orig_level
]) {
2331 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2332 btrfs_header_generation(path
->nodes
[orig_level
]));
2336 struct extent_buffer
*next
;
2338 next
= path
->nodes
[orig_level
];
2341 btrfs_tree_lock(next
);
2342 btrfs_set_lock_blocking(next
);
2343 clean_tree_block(trans
, log
, next
);
2344 btrfs_wait_tree_block_writeback(next
);
2345 btrfs_tree_unlock(next
);
2348 WARN_ON(log
->root_key
.objectid
!=
2349 BTRFS_TREE_LOG_OBJECTID
);
2350 ret
= btrfs_free_and_pin_reserved_extent(log
, next
->start
,
2358 btrfs_free_path(path
);
2363 * helper function to update the item for a given subvolumes log root
2364 * in the tree of log roots
2366 static int update_log_root(struct btrfs_trans_handle
*trans
,
2367 struct btrfs_root
*log
)
2371 if (log
->log_transid
== 1) {
2372 /* insert root item on the first sync */
2373 ret
= btrfs_insert_root(trans
, log
->fs_info
->log_root_tree
,
2374 &log
->root_key
, &log
->root_item
);
2376 ret
= btrfs_update_root(trans
, log
->fs_info
->log_root_tree
,
2377 &log
->root_key
, &log
->root_item
);
2382 static void wait_log_commit(struct btrfs_trans_handle
*trans
,
2383 struct btrfs_root
*root
, int transid
)
2386 int index
= transid
% 2;
2389 * we only allow two pending log transactions at a time,
2390 * so we know that if ours is more than 2 older than the
2391 * current transaction, we're done
2394 prepare_to_wait(&root
->log_commit_wait
[index
],
2395 &wait
, TASK_UNINTERRUPTIBLE
);
2396 mutex_unlock(&root
->log_mutex
);
2398 if (root
->log_transid_committed
< transid
&&
2399 atomic_read(&root
->log_commit
[index
]))
2402 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2403 mutex_lock(&root
->log_mutex
);
2404 } while (root
->log_transid_committed
< transid
&&
2405 atomic_read(&root
->log_commit
[index
]));
2408 static void wait_for_writer(struct btrfs_trans_handle
*trans
,
2409 struct btrfs_root
*root
)
2413 while (atomic_read(&root
->log_writers
)) {
2414 prepare_to_wait(&root
->log_writer_wait
,
2415 &wait
, TASK_UNINTERRUPTIBLE
);
2416 mutex_unlock(&root
->log_mutex
);
2417 if (atomic_read(&root
->log_writers
))
2419 mutex_lock(&root
->log_mutex
);
2420 finish_wait(&root
->log_writer_wait
, &wait
);
2424 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2425 struct btrfs_log_ctx
*ctx
)
2430 mutex_lock(&root
->log_mutex
);
2431 list_del_init(&ctx
->list
);
2432 mutex_unlock(&root
->log_mutex
);
2436 * Invoked in log mutex context, or be sure there is no other task which
2437 * can access the list.
2439 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2440 int index
, int error
)
2442 struct btrfs_log_ctx
*ctx
;
2445 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2449 list_for_each_entry(ctx
, &root
->log_ctxs
[index
], list
)
2450 ctx
->log_ret
= error
;
2452 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2456 * btrfs_sync_log does sends a given tree log down to the disk and
2457 * updates the super blocks to record it. When this call is done,
2458 * you know that any inodes previously logged are safely on disk only
2461 * Any other return value means you need to call btrfs_commit_transaction.
2462 * Some of the edge cases for fsyncing directories that have had unlinks
2463 * or renames done in the past mean that sometimes the only safe
2464 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2465 * that has happened.
2467 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2468 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
2474 struct btrfs_root
*log
= root
->log_root
;
2475 struct btrfs_root
*log_root_tree
= root
->fs_info
->log_root_tree
;
2476 int log_transid
= 0;
2477 struct btrfs_log_ctx root_log_ctx
;
2478 struct blk_plug plug
;
2480 mutex_lock(&root
->log_mutex
);
2481 log_transid
= ctx
->log_transid
;
2482 if (root
->log_transid_committed
>= log_transid
) {
2483 mutex_unlock(&root
->log_mutex
);
2484 return ctx
->log_ret
;
2487 index1
= log_transid
% 2;
2488 if (atomic_read(&root
->log_commit
[index1
])) {
2489 wait_log_commit(trans
, root
, log_transid
);
2490 mutex_unlock(&root
->log_mutex
);
2491 return ctx
->log_ret
;
2493 ASSERT(log_transid
== root
->log_transid
);
2494 atomic_set(&root
->log_commit
[index1
], 1);
2496 /* wait for previous tree log sync to complete */
2497 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2498 wait_log_commit(trans
, root
, log_transid
- 1);
2501 int batch
= atomic_read(&root
->log_batch
);
2502 /* when we're on an ssd, just kick the log commit out */
2503 if (!btrfs_test_opt(root
, SSD
) &&
2504 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
2505 mutex_unlock(&root
->log_mutex
);
2506 schedule_timeout_uninterruptible(1);
2507 mutex_lock(&root
->log_mutex
);
2509 wait_for_writer(trans
, root
);
2510 if (batch
== atomic_read(&root
->log_batch
))
2514 /* bail out if we need to do a full commit */
2515 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2517 btrfs_free_logged_extents(log
, log_transid
);
2518 mutex_unlock(&root
->log_mutex
);
2522 if (log_transid
% 2 == 0)
2523 mark
= EXTENT_DIRTY
;
2527 /* we start IO on all the marked extents here, but we don't actually
2528 * wait for them until later.
2530 blk_start_plug(&plug
);
2531 ret
= btrfs_write_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2533 blk_finish_plug(&plug
);
2534 btrfs_abort_transaction(trans
, root
, ret
);
2535 btrfs_free_logged_extents(log
, log_transid
);
2536 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2537 mutex_unlock(&root
->log_mutex
);
2541 btrfs_set_root_node(&log
->root_item
, log
->node
);
2543 root
->log_transid
++;
2544 log
->log_transid
= root
->log_transid
;
2545 root
->log_start_pid
= 0;
2547 * IO has been started, blocks of the log tree have WRITTEN flag set
2548 * in their headers. new modifications of the log will be written to
2549 * new positions. so it's safe to allow log writers to go in.
2551 mutex_unlock(&root
->log_mutex
);
2553 btrfs_init_log_ctx(&root_log_ctx
);
2555 mutex_lock(&log_root_tree
->log_mutex
);
2556 atomic_inc(&log_root_tree
->log_batch
);
2557 atomic_inc(&log_root_tree
->log_writers
);
2559 index2
= log_root_tree
->log_transid
% 2;
2560 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
2561 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
2563 mutex_unlock(&log_root_tree
->log_mutex
);
2565 ret
= update_log_root(trans
, log
);
2567 mutex_lock(&log_root_tree
->log_mutex
);
2568 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2570 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2571 wake_up(&log_root_tree
->log_writer_wait
);
2575 if (!list_empty(&root_log_ctx
.list
))
2576 list_del_init(&root_log_ctx
.list
);
2578 blk_finish_plug(&plug
);
2579 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2581 if (ret
!= -ENOSPC
) {
2582 btrfs_abort_transaction(trans
, root
, ret
);
2583 mutex_unlock(&log_root_tree
->log_mutex
);
2586 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2587 btrfs_free_logged_extents(log
, log_transid
);
2588 mutex_unlock(&log_root_tree
->log_mutex
);
2593 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
2594 mutex_unlock(&log_root_tree
->log_mutex
);
2595 ret
= root_log_ctx
.log_ret
;
2599 index2
= root_log_ctx
.log_transid
% 2;
2600 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2601 blk_finish_plug(&plug
);
2602 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2603 wait_log_commit(trans
, log_root_tree
,
2604 root_log_ctx
.log_transid
);
2605 btrfs_free_logged_extents(log
, log_transid
);
2606 mutex_unlock(&log_root_tree
->log_mutex
);
2607 ret
= root_log_ctx
.log_ret
;
2610 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
2611 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2613 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2614 wait_log_commit(trans
, log_root_tree
,
2615 root_log_ctx
.log_transid
- 1);
2618 wait_for_writer(trans
, log_root_tree
);
2621 * now that we've moved on to the tree of log tree roots,
2622 * check the full commit flag again
2624 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2625 blk_finish_plug(&plug
);
2626 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2627 btrfs_free_logged_extents(log
, log_transid
);
2628 mutex_unlock(&log_root_tree
->log_mutex
);
2630 goto out_wake_log_root
;
2633 ret
= btrfs_write_marked_extents(log_root_tree
,
2634 &log_root_tree
->dirty_log_pages
,
2635 EXTENT_DIRTY
| EXTENT_NEW
);
2636 blk_finish_plug(&plug
);
2638 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2639 btrfs_abort_transaction(trans
, root
, ret
);
2640 btrfs_free_logged_extents(log
, log_transid
);
2641 mutex_unlock(&log_root_tree
->log_mutex
);
2642 goto out_wake_log_root
;
2644 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2645 btrfs_wait_marked_extents(log_root_tree
,
2646 &log_root_tree
->dirty_log_pages
,
2647 EXTENT_NEW
| EXTENT_DIRTY
);
2648 btrfs_wait_logged_extents(log
, log_transid
);
2650 btrfs_set_super_log_root(root
->fs_info
->super_for_commit
,
2651 log_root_tree
->node
->start
);
2652 btrfs_set_super_log_root_level(root
->fs_info
->super_for_commit
,
2653 btrfs_header_level(log_root_tree
->node
));
2655 log_root_tree
->log_transid
++;
2656 mutex_unlock(&log_root_tree
->log_mutex
);
2659 * nobody else is going to jump in and write the the ctree
2660 * super here because the log_commit atomic below is protecting
2661 * us. We must be called with a transaction handle pinning
2662 * the running transaction open, so a full commit can't hop
2663 * in and cause problems either.
2665 ret
= write_ctree_super(trans
, root
->fs_info
->tree_root
, 1);
2667 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2668 btrfs_abort_transaction(trans
, root
, ret
);
2669 goto out_wake_log_root
;
2672 mutex_lock(&root
->log_mutex
);
2673 if (root
->last_log_commit
< log_transid
)
2674 root
->last_log_commit
= log_transid
;
2675 mutex_unlock(&root
->log_mutex
);
2679 * We needn't get log_mutex here because we are sure all
2680 * the other tasks are blocked.
2682 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
2684 mutex_lock(&log_root_tree
->log_mutex
);
2685 log_root_tree
->log_transid_committed
++;
2686 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
2687 mutex_unlock(&log_root_tree
->log_mutex
);
2689 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
2690 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
2693 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
2695 mutex_lock(&root
->log_mutex
);
2696 root
->log_transid_committed
++;
2697 atomic_set(&root
->log_commit
[index1
], 0);
2698 mutex_unlock(&root
->log_mutex
);
2700 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
2701 wake_up(&root
->log_commit_wait
[index1
]);
2705 static void free_log_tree(struct btrfs_trans_handle
*trans
,
2706 struct btrfs_root
*log
)
2711 struct walk_control wc
= {
2713 .process_func
= process_one_buffer
2716 ret
= walk_log_tree(trans
, log
, &wc
);
2717 /* I don't think this can happen but just in case */
2719 btrfs_abort_transaction(trans
, log
, ret
);
2722 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
2723 0, &start
, &end
, EXTENT_DIRTY
| EXTENT_NEW
,
2728 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
2729 EXTENT_DIRTY
| EXTENT_NEW
, GFP_NOFS
);
2733 * We may have short-circuited the log tree with the full commit logic
2734 * and left ordered extents on our list, so clear these out to keep us
2735 * from leaking inodes and memory.
2737 btrfs_free_logged_extents(log
, 0);
2738 btrfs_free_logged_extents(log
, 1);
2740 free_extent_buffer(log
->node
);
2745 * free all the extents used by the tree log. This should be called
2746 * at commit time of the full transaction
2748 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
2750 if (root
->log_root
) {
2751 free_log_tree(trans
, root
->log_root
);
2752 root
->log_root
= NULL
;
2757 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
2758 struct btrfs_fs_info
*fs_info
)
2760 if (fs_info
->log_root_tree
) {
2761 free_log_tree(trans
, fs_info
->log_root_tree
);
2762 fs_info
->log_root_tree
= NULL
;
2768 * If both a file and directory are logged, and unlinks or renames are
2769 * mixed in, we have a few interesting corners:
2771 * create file X in dir Y
2772 * link file X to X.link in dir Y
2774 * unlink file X but leave X.link
2777 * After a crash we would expect only X.link to exist. But file X
2778 * didn't get fsync'd again so the log has back refs for X and X.link.
2780 * We solve this by removing directory entries and inode backrefs from the
2781 * log when a file that was logged in the current transaction is
2782 * unlinked. Any later fsync will include the updated log entries, and
2783 * we'll be able to reconstruct the proper directory items from backrefs.
2785 * This optimizations allows us to avoid relogging the entire inode
2786 * or the entire directory.
2788 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
2789 struct btrfs_root
*root
,
2790 const char *name
, int name_len
,
2791 struct inode
*dir
, u64 index
)
2793 struct btrfs_root
*log
;
2794 struct btrfs_dir_item
*di
;
2795 struct btrfs_path
*path
;
2799 u64 dir_ino
= btrfs_ino(dir
);
2801 if (BTRFS_I(dir
)->logged_trans
< trans
->transid
)
2804 ret
= join_running_log_trans(root
);
2808 mutex_lock(&BTRFS_I(dir
)->log_mutex
);
2810 log
= root
->log_root
;
2811 path
= btrfs_alloc_path();
2817 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
2818 name
, name_len
, -1);
2824 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2825 bytes_del
+= name_len
;
2831 btrfs_release_path(path
);
2832 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
2833 index
, name
, name_len
, -1);
2839 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2840 bytes_del
+= name_len
;
2847 /* update the directory size in the log to reflect the names
2851 struct btrfs_key key
;
2853 key
.objectid
= dir_ino
;
2855 key
.type
= BTRFS_INODE_ITEM_KEY
;
2856 btrfs_release_path(path
);
2858 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
2864 struct btrfs_inode_item
*item
;
2867 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2868 struct btrfs_inode_item
);
2869 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
2870 if (i_size
> bytes_del
)
2871 i_size
-= bytes_del
;
2874 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
2875 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2878 btrfs_release_path(path
);
2881 btrfs_free_path(path
);
2883 mutex_unlock(&BTRFS_I(dir
)->log_mutex
);
2884 if (ret
== -ENOSPC
) {
2885 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2888 btrfs_abort_transaction(trans
, root
, ret
);
2890 btrfs_end_log_trans(root
);
2895 /* see comments for btrfs_del_dir_entries_in_log */
2896 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
2897 struct btrfs_root
*root
,
2898 const char *name
, int name_len
,
2899 struct inode
*inode
, u64 dirid
)
2901 struct btrfs_root
*log
;
2905 if (BTRFS_I(inode
)->logged_trans
< trans
->transid
)
2908 ret
= join_running_log_trans(root
);
2911 log
= root
->log_root
;
2912 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
2914 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
2916 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
2917 if (ret
== -ENOSPC
) {
2918 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2920 } else if (ret
< 0 && ret
!= -ENOENT
)
2921 btrfs_abort_transaction(trans
, root
, ret
);
2922 btrfs_end_log_trans(root
);
2928 * creates a range item in the log for 'dirid'. first_offset and
2929 * last_offset tell us which parts of the key space the log should
2930 * be considered authoritative for.
2932 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
2933 struct btrfs_root
*log
,
2934 struct btrfs_path
*path
,
2935 int key_type
, u64 dirid
,
2936 u64 first_offset
, u64 last_offset
)
2939 struct btrfs_key key
;
2940 struct btrfs_dir_log_item
*item
;
2942 key
.objectid
= dirid
;
2943 key
.offset
= first_offset
;
2944 if (key_type
== BTRFS_DIR_ITEM_KEY
)
2945 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
2947 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
2948 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
2952 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2953 struct btrfs_dir_log_item
);
2954 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
2955 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2956 btrfs_release_path(path
);
2961 * log all the items included in the current transaction for a given
2962 * directory. This also creates the range items in the log tree required
2963 * to replay anything deleted before the fsync
2965 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
2966 struct btrfs_root
*root
, struct inode
*inode
,
2967 struct btrfs_path
*path
,
2968 struct btrfs_path
*dst_path
, int key_type
,
2969 u64 min_offset
, u64
*last_offset_ret
)
2971 struct btrfs_key min_key
;
2972 struct btrfs_root
*log
= root
->log_root
;
2973 struct extent_buffer
*src
;
2978 u64 first_offset
= min_offset
;
2979 u64 last_offset
= (u64
)-1;
2980 u64 ino
= btrfs_ino(inode
);
2982 log
= root
->log_root
;
2984 min_key
.objectid
= ino
;
2985 min_key
.type
= key_type
;
2986 min_key
.offset
= min_offset
;
2988 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
2991 * we didn't find anything from this transaction, see if there
2992 * is anything at all
2994 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
2995 min_key
.objectid
= ino
;
2996 min_key
.type
= key_type
;
2997 min_key
.offset
= (u64
)-1;
2998 btrfs_release_path(path
);
2999 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3001 btrfs_release_path(path
);
3004 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3006 /* if ret == 0 there are items for this type,
3007 * create a range to tell us the last key of this type.
3008 * otherwise, there are no items in this directory after
3009 * *min_offset, and we create a range to indicate that.
3012 struct btrfs_key tmp
;
3013 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3015 if (key_type
== tmp
.type
)
3016 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3021 /* go backward to find any previous key */
3022 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3024 struct btrfs_key tmp
;
3025 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3026 if (key_type
== tmp
.type
) {
3027 first_offset
= tmp
.offset
;
3028 ret
= overwrite_item(trans
, log
, dst_path
,
3029 path
->nodes
[0], path
->slots
[0],
3037 btrfs_release_path(path
);
3039 /* find the first key from this transaction again */
3040 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3041 if (WARN_ON(ret
!= 0))
3045 * we have a block from this transaction, log every item in it
3046 * from our directory
3049 struct btrfs_key tmp
;
3050 src
= path
->nodes
[0];
3051 nritems
= btrfs_header_nritems(src
);
3052 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3053 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3055 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3057 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3064 path
->slots
[0] = nritems
;
3067 * look ahead to the next item and see if it is also
3068 * from this directory and from this transaction
3070 ret
= btrfs_next_leaf(root
, path
);
3072 last_offset
= (u64
)-1;
3075 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3076 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3077 last_offset
= (u64
)-1;
3080 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3081 ret
= overwrite_item(trans
, log
, dst_path
,
3082 path
->nodes
[0], path
->slots
[0],
3087 last_offset
= tmp
.offset
;
3092 btrfs_release_path(path
);
3093 btrfs_release_path(dst_path
);
3096 *last_offset_ret
= last_offset
;
3098 * insert the log range keys to indicate where the log
3101 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3102 ino
, first_offset
, last_offset
);
3110 * logging directories is very similar to logging inodes, We find all the items
3111 * from the current transaction and write them to the log.
3113 * The recovery code scans the directory in the subvolume, and if it finds a
3114 * key in the range logged that is not present in the log tree, then it means
3115 * that dir entry was unlinked during the transaction.
3117 * In order for that scan to work, we must include one key smaller than
3118 * the smallest logged by this transaction and one key larger than the largest
3119 * key logged by this transaction.
3121 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3122 struct btrfs_root
*root
, struct inode
*inode
,
3123 struct btrfs_path
*path
,
3124 struct btrfs_path
*dst_path
)
3129 int key_type
= BTRFS_DIR_ITEM_KEY
;
3135 ret
= log_dir_items(trans
, root
, inode
, path
,
3136 dst_path
, key_type
, min_key
,
3140 if (max_key
== (u64
)-1)
3142 min_key
= max_key
+ 1;
3145 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3146 key_type
= BTRFS_DIR_INDEX_KEY
;
3153 * a helper function to drop items from the log before we relog an
3154 * inode. max_key_type indicates the highest item type to remove.
3155 * This cannot be run for file data extents because it does not
3156 * free the extents they point to.
3158 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3159 struct btrfs_root
*log
,
3160 struct btrfs_path
*path
,
3161 u64 objectid
, int max_key_type
)
3164 struct btrfs_key key
;
3165 struct btrfs_key found_key
;
3168 key
.objectid
= objectid
;
3169 key
.type
= max_key_type
;
3170 key
.offset
= (u64
)-1;
3173 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3174 BUG_ON(ret
== 0); /* Logic error */
3178 if (path
->slots
[0] == 0)
3182 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3185 if (found_key
.objectid
!= objectid
)
3188 found_key
.offset
= 0;
3190 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3193 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3194 path
->slots
[0] - start_slot
+ 1);
3196 * If start slot isn't 0 then we don't need to re-search, we've
3197 * found the last guy with the objectid in this tree.
3199 if (ret
|| start_slot
!= 0)
3201 btrfs_release_path(path
);
3203 btrfs_release_path(path
);
3209 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3210 struct extent_buffer
*leaf
,
3211 struct btrfs_inode_item
*item
,
3212 struct inode
*inode
, int log_inode_only
)
3214 struct btrfs_map_token token
;
3216 btrfs_init_map_token(&token
);
3218 if (log_inode_only
) {
3219 /* set the generation to zero so the recover code
3220 * can tell the difference between an logging
3221 * just to say 'this inode exists' and a logging
3222 * to say 'update this inode with these values'
3224 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3225 btrfs_set_token_inode_size(leaf
, item
, 0, &token
);
3227 btrfs_set_token_inode_generation(leaf
, item
,
3228 BTRFS_I(inode
)->generation
,
3230 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3233 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3234 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3235 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3236 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3238 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_atime(item
),
3239 inode
->i_atime
.tv_sec
, &token
);
3240 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_atime(item
),
3241 inode
->i_atime
.tv_nsec
, &token
);
3243 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_mtime(item
),
3244 inode
->i_mtime
.tv_sec
, &token
);
3245 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_mtime(item
),
3246 inode
->i_mtime
.tv_nsec
, &token
);
3248 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_ctime(item
),
3249 inode
->i_ctime
.tv_sec
, &token
);
3250 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_ctime(item
),
3251 inode
->i_ctime
.tv_nsec
, &token
);
3253 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3256 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3257 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3258 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3259 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3260 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3263 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3264 struct btrfs_root
*log
, struct btrfs_path
*path
,
3265 struct inode
*inode
)
3267 struct btrfs_inode_item
*inode_item
;
3270 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3271 &BTRFS_I(inode
)->location
,
3272 sizeof(*inode_item
));
3273 if (ret
&& ret
!= -EEXIST
)
3275 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3276 struct btrfs_inode_item
);
3277 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
, 0);
3278 btrfs_release_path(path
);
3282 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3283 struct inode
*inode
,
3284 struct btrfs_path
*dst_path
,
3285 struct btrfs_path
*src_path
, u64
*last_extent
,
3286 int start_slot
, int nr
, int inode_only
)
3288 unsigned long src_offset
;
3289 unsigned long dst_offset
;
3290 struct btrfs_root
*log
= BTRFS_I(inode
)->root
->log_root
;
3291 struct btrfs_file_extent_item
*extent
;
3292 struct btrfs_inode_item
*inode_item
;
3293 struct extent_buffer
*src
= src_path
->nodes
[0];
3294 struct btrfs_key first_key
, last_key
, key
;
3296 struct btrfs_key
*ins_keys
;
3300 struct list_head ordered_sums
;
3301 int skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3302 bool has_extents
= false;
3303 bool need_find_last_extent
= true;
3306 INIT_LIST_HEAD(&ordered_sums
);
3308 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3309 nr
* sizeof(u32
), GFP_NOFS
);
3313 first_key
.objectid
= (u64
)-1;
3315 ins_sizes
= (u32
*)ins_data
;
3316 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3318 for (i
= 0; i
< nr
; i
++) {
3319 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3320 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3322 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3323 ins_keys
, ins_sizes
, nr
);
3329 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3330 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3331 dst_path
->slots
[0]);
3333 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3335 if ((i
== (nr
- 1)))
3336 last_key
= ins_keys
[i
];
3338 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3339 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3341 struct btrfs_inode_item
);
3342 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3343 inode
, inode_only
== LOG_INODE_EXISTS
);
3345 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3346 src_offset
, ins_sizes
[i
]);
3350 * We set need_find_last_extent here in case we know we were
3351 * processing other items and then walk into the first extent in
3352 * the inode. If we don't hit an extent then nothing changes,
3353 * we'll do the last search the next time around.
3355 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3357 if (first_key
.objectid
== (u64
)-1)
3358 first_key
= ins_keys
[i
];
3360 need_find_last_extent
= false;
3363 /* take a reference on file data extents so that truncates
3364 * or deletes of this inode don't have to relog the inode
3367 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
3370 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3371 struct btrfs_file_extent_item
);
3373 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3376 found_type
= btrfs_file_extent_type(src
, extent
);
3377 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3379 ds
= btrfs_file_extent_disk_bytenr(src
,
3381 /* ds == 0 is a hole */
3385 dl
= btrfs_file_extent_disk_num_bytes(src
,
3387 cs
= btrfs_file_extent_offset(src
, extent
);
3388 cl
= btrfs_file_extent_num_bytes(src
,
3390 if (btrfs_file_extent_compression(src
,
3396 ret
= btrfs_lookup_csums_range(
3397 log
->fs_info
->csum_root
,
3398 ds
+ cs
, ds
+ cs
+ cl
- 1,
3401 btrfs_release_path(dst_path
);
3409 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3410 btrfs_release_path(dst_path
);
3414 * we have to do this after the loop above to avoid changing the
3415 * log tree while trying to change the log tree.
3418 while (!list_empty(&ordered_sums
)) {
3419 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3420 struct btrfs_ordered_sum
,
3423 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3424 list_del(&sums
->list
);
3431 if (need_find_last_extent
&& *last_extent
== first_key
.offset
) {
3433 * We don't have any leafs between our current one and the one
3434 * we processed before that can have file extent items for our
3435 * inode (and have a generation number smaller than our current
3438 need_find_last_extent
= false;
3442 * Because we use btrfs_search_forward we could skip leaves that were
3443 * not modified and then assume *last_extent is valid when it really
3444 * isn't. So back up to the previous leaf and read the end of the last
3445 * extent before we go and fill in holes.
3447 if (need_find_last_extent
) {
3450 ret
= btrfs_prev_leaf(BTRFS_I(inode
)->root
, src_path
);
3455 if (src_path
->slots
[0])
3456 src_path
->slots
[0]--;
3457 src
= src_path
->nodes
[0];
3458 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
3459 if (key
.objectid
!= btrfs_ino(inode
) ||
3460 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3462 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
3463 struct btrfs_file_extent_item
);
3464 if (btrfs_file_extent_type(src
, extent
) ==
3465 BTRFS_FILE_EXTENT_INLINE
) {
3466 len
= btrfs_file_extent_inline_len(src
,
3469 *last_extent
= ALIGN(key
.offset
+ len
,
3472 len
= btrfs_file_extent_num_bytes(src
, extent
);
3473 *last_extent
= key
.offset
+ len
;
3477 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3478 * things could have happened
3480 * 1) A merge could have happened, so we could currently be on a leaf
3481 * that holds what we were copying in the first place.
3482 * 2) A split could have happened, and now not all of the items we want
3483 * are on the same leaf.
3485 * So we need to adjust how we search for holes, we need to drop the
3486 * path and re-search for the first extent key we found, and then walk
3487 * forward until we hit the last one we copied.
3489 if (need_find_last_extent
) {
3490 /* btrfs_prev_leaf could return 1 without releasing the path */
3491 btrfs_release_path(src_path
);
3492 ret
= btrfs_search_slot(NULL
, BTRFS_I(inode
)->root
, &first_key
,
3497 src
= src_path
->nodes
[0];
3498 i
= src_path
->slots
[0];
3504 * Ok so here we need to go through and fill in any holes we may have
3505 * to make sure that holes are punched for those areas in case they had
3506 * extents previously.
3512 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
3513 ret
= btrfs_next_leaf(BTRFS_I(inode
)->root
, src_path
);
3517 src
= src_path
->nodes
[0];
3521 btrfs_item_key_to_cpu(src
, &key
, i
);
3522 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
3524 if (key
.objectid
!= btrfs_ino(inode
) ||
3525 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
3529 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
3530 if (btrfs_file_extent_type(src
, extent
) ==
3531 BTRFS_FILE_EXTENT_INLINE
) {
3532 len
= btrfs_file_extent_inline_len(src
, i
, extent
);
3533 extent_end
= ALIGN(key
.offset
+ len
, log
->sectorsize
);
3535 len
= btrfs_file_extent_num_bytes(src
, extent
);
3536 extent_end
= key
.offset
+ len
;
3540 if (*last_extent
== key
.offset
) {
3541 *last_extent
= extent_end
;
3544 offset
= *last_extent
;
3545 len
= key
.offset
- *last_extent
;
3546 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
3547 offset
, 0, 0, len
, 0, len
, 0,
3551 *last_extent
= extent_end
;
3554 * Need to let the callers know we dropped the path so they should
3557 if (!ret
&& need_find_last_extent
)
3562 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3564 struct extent_map
*em1
, *em2
;
3566 em1
= list_entry(a
, struct extent_map
, list
);
3567 em2
= list_entry(b
, struct extent_map
, list
);
3569 if (em1
->start
< em2
->start
)
3571 else if (em1
->start
> em2
->start
)
3576 static int wait_ordered_extents(struct btrfs_trans_handle
*trans
,
3577 struct inode
*inode
,
3578 struct btrfs_root
*root
,
3579 const struct extent_map
*em
,
3580 const struct list_head
*logged_list
,
3581 bool *ordered_io_error
)
3583 struct btrfs_ordered_extent
*ordered
;
3584 struct btrfs_root
*log
= root
->log_root
;
3585 u64 mod_start
= em
->mod_start
;
3586 u64 mod_len
= em
->mod_len
;
3587 const bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3590 LIST_HEAD(ordered_sums
);
3593 *ordered_io_error
= false;
3595 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
3596 em
->block_start
== EXTENT_MAP_HOLE
)
3600 * Wait far any ordered extent that covers our extent map. If it
3601 * finishes without an error, first check and see if our csums are on
3602 * our outstanding ordered extents.
3604 list_for_each_entry(ordered
, logged_list
, log_list
) {
3605 struct btrfs_ordered_sum
*sum
;
3610 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
3611 mod_start
+ mod_len
<= ordered
->file_offset
)
3614 if (!test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) &&
3615 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
3616 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
)) {
3617 const u64 start
= ordered
->file_offset
;
3618 const u64 end
= ordered
->file_offset
+ ordered
->len
- 1;
3620 WARN_ON(ordered
->inode
!= inode
);
3621 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3624 wait_event(ordered
->wait
,
3625 (test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) ||
3626 test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)));
3628 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)) {
3629 *ordered_io_error
= true;
3633 * We are going to copy all the csums on this ordered extent, so
3634 * go ahead and adjust mod_start and mod_len in case this
3635 * ordered extent has already been logged.
3637 if (ordered
->file_offset
> mod_start
) {
3638 if (ordered
->file_offset
+ ordered
->len
>=
3639 mod_start
+ mod_len
)
3640 mod_len
= ordered
->file_offset
- mod_start
;
3642 * If we have this case
3644 * |--------- logged extent ---------|
3645 * |----- ordered extent ----|
3647 * Just don't mess with mod_start and mod_len, we'll
3648 * just end up logging more csums than we need and it
3652 if (ordered
->file_offset
+ ordered
->len
<
3653 mod_start
+ mod_len
) {
3654 mod_len
= (mod_start
+ mod_len
) -
3655 (ordered
->file_offset
+ ordered
->len
);
3656 mod_start
= ordered
->file_offset
+
3667 * To keep us from looping for the above case of an ordered
3668 * extent that falls inside of the logged extent.
3670 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
3674 if (ordered
->csum_bytes_left
) {
3675 btrfs_start_ordered_extent(inode
, ordered
, 0);
3676 wait_event(ordered
->wait
,
3677 ordered
->csum_bytes_left
== 0);
3680 list_for_each_entry(sum
, &ordered
->list
, list
) {
3681 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
3687 if (*ordered_io_error
|| !mod_len
|| ret
|| skip_csum
)
3690 if (em
->compress_type
) {
3692 csum_len
= max(em
->block_len
, em
->orig_block_len
);
3694 csum_offset
= mod_start
- em
->start
;
3698 /* block start is already adjusted for the file extent offset. */
3699 ret
= btrfs_lookup_csums_range(log
->fs_info
->csum_root
,
3700 em
->block_start
+ csum_offset
,
3701 em
->block_start
+ csum_offset
+
3702 csum_len
- 1, &ordered_sums
, 0);
3706 while (!list_empty(&ordered_sums
)) {
3707 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3708 struct btrfs_ordered_sum
,
3711 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3712 list_del(&sums
->list
);
3719 static int log_one_extent(struct btrfs_trans_handle
*trans
,
3720 struct inode
*inode
, struct btrfs_root
*root
,
3721 const struct extent_map
*em
,
3722 struct btrfs_path
*path
,
3723 const struct list_head
*logged_list
,
3724 struct btrfs_log_ctx
*ctx
)
3726 struct btrfs_root
*log
= root
->log_root
;
3727 struct btrfs_file_extent_item
*fi
;
3728 struct extent_buffer
*leaf
;
3729 struct btrfs_map_token token
;
3730 struct btrfs_key key
;
3731 u64 extent_offset
= em
->start
- em
->orig_start
;
3734 int extent_inserted
= 0;
3735 bool ordered_io_err
= false;
3737 ret
= wait_ordered_extents(trans
, inode
, root
, em
, logged_list
,
3742 if (ordered_io_err
) {
3747 btrfs_init_map_token(&token
);
3749 ret
= __btrfs_drop_extents(trans
, log
, inode
, path
, em
->start
,
3750 em
->start
+ em
->len
, NULL
, 0, 1,
3751 sizeof(*fi
), &extent_inserted
);
3755 if (!extent_inserted
) {
3756 key
.objectid
= btrfs_ino(inode
);
3757 key
.type
= BTRFS_EXTENT_DATA_KEY
;
3758 key
.offset
= em
->start
;
3760 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
3765 leaf
= path
->nodes
[0];
3766 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3767 struct btrfs_file_extent_item
);
3769 btrfs_set_token_file_extent_generation(leaf
, fi
, em
->generation
,
3771 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
3772 btrfs_set_token_file_extent_type(leaf
, fi
,
3773 BTRFS_FILE_EXTENT_PREALLOC
,
3776 btrfs_set_token_file_extent_type(leaf
, fi
,
3777 BTRFS_FILE_EXTENT_REG
,
3780 block_len
= max(em
->block_len
, em
->orig_block_len
);
3781 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
3782 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3785 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3787 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
3788 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3790 extent_offset
, &token
);
3791 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3794 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
3795 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
3799 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
, &token
);
3800 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
3801 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
3802 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
3804 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
3805 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
3806 btrfs_mark_buffer_dirty(leaf
);
3808 btrfs_release_path(path
);
3813 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
3814 struct btrfs_root
*root
,
3815 struct inode
*inode
,
3816 struct btrfs_path
*path
,
3817 struct list_head
*logged_list
,
3818 struct btrfs_log_ctx
*ctx
)
3820 struct extent_map
*em
, *n
;
3821 struct list_head extents
;
3822 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
3827 INIT_LIST_HEAD(&extents
);
3829 write_lock(&tree
->lock
);
3830 test_gen
= root
->fs_info
->last_trans_committed
;
3832 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
3833 list_del_init(&em
->list
);
3836 * Just an arbitrary number, this can be really CPU intensive
3837 * once we start getting a lot of extents, and really once we
3838 * have a bunch of extents we just want to commit since it will
3841 if (++num
> 32768) {
3842 list_del_init(&tree
->modified_extents
);
3847 if (em
->generation
<= test_gen
)
3849 /* Need a ref to keep it from getting evicted from cache */
3850 atomic_inc(&em
->refs
);
3851 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
3852 list_add_tail(&em
->list
, &extents
);
3856 list_sort(NULL
, &extents
, extent_cmp
);
3859 while (!list_empty(&extents
)) {
3860 em
= list_entry(extents
.next
, struct extent_map
, list
);
3862 list_del_init(&em
->list
);
3865 * If we had an error we just need to delete everybody from our
3869 clear_em_logging(tree
, em
);
3870 free_extent_map(em
);
3874 write_unlock(&tree
->lock
);
3876 ret
= log_one_extent(trans
, inode
, root
, em
, path
, logged_list
,
3878 write_lock(&tree
->lock
);
3879 clear_em_logging(tree
, em
);
3880 free_extent_map(em
);
3882 WARN_ON(!list_empty(&extents
));
3883 write_unlock(&tree
->lock
);
3885 btrfs_release_path(path
);
3889 /* log a single inode in the tree log.
3890 * At least one parent directory for this inode must exist in the tree
3891 * or be logged already.
3893 * Any items from this inode changed by the current transaction are copied
3894 * to the log tree. An extra reference is taken on any extents in this
3895 * file, allowing us to avoid a whole pile of corner cases around logging
3896 * blocks that have been removed from the tree.
3898 * See LOG_INODE_ALL and related defines for a description of what inode_only
3901 * This handles both files and directories.
3903 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
3904 struct btrfs_root
*root
, struct inode
*inode
,
3908 struct btrfs_log_ctx
*ctx
)
3910 struct btrfs_path
*path
;
3911 struct btrfs_path
*dst_path
;
3912 struct btrfs_key min_key
;
3913 struct btrfs_key max_key
;
3914 struct btrfs_root
*log
= root
->log_root
;
3915 struct extent_buffer
*src
= NULL
;
3916 LIST_HEAD(logged_list
);
3917 u64 last_extent
= 0;
3921 int ins_start_slot
= 0;
3923 bool fast_search
= false;
3924 u64 ino
= btrfs_ino(inode
);
3925 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
3927 path
= btrfs_alloc_path();
3930 dst_path
= btrfs_alloc_path();
3932 btrfs_free_path(path
);
3936 min_key
.objectid
= ino
;
3937 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
3940 max_key
.objectid
= ino
;
3943 /* today the code can only do partial logging of directories */
3944 if (S_ISDIR(inode
->i_mode
) ||
3945 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3946 &BTRFS_I(inode
)->runtime_flags
) &&
3947 inode_only
== LOG_INODE_EXISTS
))
3948 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3950 max_key
.type
= (u8
)-1;
3951 max_key
.offset
= (u64
)-1;
3953 /* Only run delayed items if we are a dir or a new file */
3954 if (S_ISDIR(inode
->i_mode
) ||
3955 BTRFS_I(inode
)->generation
> root
->fs_info
->last_trans_committed
) {
3956 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
3958 btrfs_free_path(path
);
3959 btrfs_free_path(dst_path
);
3964 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
3966 btrfs_get_logged_extents(inode
, &logged_list
);
3969 * a brute force approach to making sure we get the most uptodate
3970 * copies of everything.
3972 if (S_ISDIR(inode
->i_mode
)) {
3973 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
3975 if (inode_only
== LOG_INODE_EXISTS
)
3976 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
3977 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
3979 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3980 &BTRFS_I(inode
)->runtime_flags
)) {
3981 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3982 &BTRFS_I(inode
)->runtime_flags
);
3983 ret
= btrfs_truncate_inode_items(trans
, log
,
3985 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3986 &BTRFS_I(inode
)->runtime_flags
) ||
3987 inode_only
== LOG_INODE_EXISTS
) {
3988 if (inode_only
== LOG_INODE_ALL
)
3990 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3991 ret
= drop_objectid_items(trans
, log
, path
, ino
,
3994 if (inode_only
== LOG_INODE_ALL
)
3996 ret
= log_inode_item(trans
, log
, dst_path
, inode
);
4012 ret
= btrfs_search_forward(root
, &min_key
,
4013 path
, trans
->transid
);
4017 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4018 if (min_key
.objectid
!= ino
)
4020 if (min_key
.type
> max_key
.type
)
4023 src
= path
->nodes
[0];
4024 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
4027 } else if (!ins_nr
) {
4028 ins_start_slot
= path
->slots
[0];
4033 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4034 ins_start_slot
, ins_nr
, inode_only
);
4040 btrfs_release_path(path
);
4044 ins_start_slot
= path
->slots
[0];
4047 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4049 if (path
->slots
[0] < nritems
) {
4050 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
4055 ret
= copy_items(trans
, inode
, dst_path
, path
,
4056 &last_extent
, ins_start_slot
,
4057 ins_nr
, inode_only
);
4065 btrfs_release_path(path
);
4067 if (min_key
.offset
< (u64
)-1) {
4069 } else if (min_key
.type
< max_key
.type
) {
4077 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4078 ins_start_slot
, ins_nr
, inode_only
);
4088 btrfs_release_path(path
);
4089 btrfs_release_path(dst_path
);
4091 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
4097 } else if (inode_only
== LOG_INODE_ALL
) {
4098 struct extent_map
*em
, *n
;
4100 write_lock(&em_tree
->lock
);
4102 * We can't just remove every em if we're called for a ranged
4103 * fsync - that is, one that doesn't cover the whole possible
4104 * file range (0 to LLONG_MAX). This is because we can have
4105 * em's that fall outside the range we're logging and therefore
4106 * their ordered operations haven't completed yet
4107 * (btrfs_finish_ordered_io() not invoked yet). This means we
4108 * didn't get their respective file extent item in the fs/subvol
4109 * tree yet, and need to let the next fast fsync (one which
4110 * consults the list of modified extent maps) find the em so
4111 * that it logs a matching file extent item and waits for the
4112 * respective ordered operation to complete (if it's still
4115 * Removing every em outside the range we're logging would make
4116 * the next fast fsync not log their matching file extent items,
4117 * therefore making us lose data after a log replay.
4119 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
,
4121 const u64 mod_end
= em
->mod_start
+ em
->mod_len
- 1;
4123 if (em
->mod_start
>= start
&& mod_end
<= end
)
4124 list_del_init(&em
->list
);
4126 write_unlock(&em_tree
->lock
);
4129 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->i_mode
)) {
4130 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
);
4137 write_lock(&em_tree
->lock
);
4139 * If we're doing a ranged fsync and there are still modified extents
4140 * in the list, we must run on the next fsync call as it might cover
4141 * those extents (a full fsync or an fsync for other range).
4143 if (list_empty(&em_tree
->modified_extents
)) {
4144 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4145 BTRFS_I(inode
)->last_log_commit
=
4146 BTRFS_I(inode
)->last_sub_trans
;
4148 write_unlock(&em_tree
->lock
);
4151 btrfs_put_logged_extents(&logged_list
);
4153 btrfs_submit_logged_extents(&logged_list
, log
);
4154 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
4156 btrfs_free_path(path
);
4157 btrfs_free_path(dst_path
);
4162 * follow the dentry parent pointers up the chain and see if any
4163 * of the directories in it require a full commit before they can
4164 * be logged. Returns zero if nothing special needs to be done or 1 if
4165 * a full commit is required.
4167 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
4168 struct inode
*inode
,
4169 struct dentry
*parent
,
4170 struct super_block
*sb
,
4174 struct btrfs_root
*root
;
4175 struct dentry
*old_parent
= NULL
;
4176 struct inode
*orig_inode
= inode
;
4179 * for regular files, if its inode is already on disk, we don't
4180 * have to worry about the parents at all. This is because
4181 * we can use the last_unlink_trans field to record renames
4182 * and other fun in this file.
4184 if (S_ISREG(inode
->i_mode
) &&
4185 BTRFS_I(inode
)->generation
<= last_committed
&&
4186 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
)
4189 if (!S_ISDIR(inode
->i_mode
)) {
4190 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4192 inode
= parent
->d_inode
;
4197 * If we are logging a directory then we start with our inode,
4198 * not our parents inode, so we need to skipp setting the
4199 * logged_trans so that further down in the log code we don't
4200 * think this inode has already been logged.
4202 if (inode
!= orig_inode
)
4203 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4206 if (BTRFS_I(inode
)->last_unlink_trans
> last_committed
) {
4207 root
= BTRFS_I(inode
)->root
;
4210 * make sure any commits to the log are forced
4211 * to be full commits
4213 btrfs_set_log_full_commit(root
->fs_info
, trans
);
4218 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4221 if (IS_ROOT(parent
))
4224 parent
= dget_parent(parent
);
4226 old_parent
= parent
;
4227 inode
= parent
->d_inode
;
4236 * helper function around btrfs_log_inode to make sure newly created
4237 * parent directories also end up in the log. A minimal inode and backref
4238 * only logging is done of any parent directories that are older than
4239 * the last committed transaction
4241 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
4242 struct btrfs_root
*root
, struct inode
*inode
,
4243 struct dentry
*parent
,
4247 struct btrfs_log_ctx
*ctx
)
4249 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
4250 struct super_block
*sb
;
4251 struct dentry
*old_parent
= NULL
;
4253 u64 last_committed
= root
->fs_info
->last_trans_committed
;
4257 if (btrfs_test_opt(root
, NOTREELOG
)) {
4263 * The prev transaction commit doesn't complete, we need do
4264 * full commit by ourselves.
4266 if (root
->fs_info
->last_trans_log_full_commit
>
4267 root
->fs_info
->last_trans_committed
) {
4272 if (root
!= BTRFS_I(inode
)->root
||
4273 btrfs_root_refs(&root
->root_item
) == 0) {
4278 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
,
4279 sb
, last_committed
);
4283 if (btrfs_inode_in_log(inode
, trans
->transid
)) {
4284 ret
= BTRFS_NO_LOG_SYNC
;
4288 ret
= start_log_trans(trans
, root
, ctx
);
4292 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, start
, end
, ctx
);
4297 * for regular files, if its inode is already on disk, we don't
4298 * have to worry about the parents at all. This is because
4299 * we can use the last_unlink_trans field to record renames
4300 * and other fun in this file.
4302 if (S_ISREG(inode
->i_mode
) &&
4303 BTRFS_I(inode
)->generation
<= last_committed
&&
4304 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
) {
4309 inode_only
= LOG_INODE_EXISTS
;
4311 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4314 inode
= parent
->d_inode
;
4315 if (root
!= BTRFS_I(inode
)->root
)
4318 if (BTRFS_I(inode
)->generation
>
4319 root
->fs_info
->last_trans_committed
) {
4320 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
,
4325 if (IS_ROOT(parent
))
4328 parent
= dget_parent(parent
);
4330 old_parent
= parent
;
4336 btrfs_set_log_full_commit(root
->fs_info
, trans
);
4341 btrfs_remove_log_ctx(root
, ctx
);
4342 btrfs_end_log_trans(root
);
4348 * it is not safe to log dentry if the chunk root has added new
4349 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4350 * If this returns 1, you must commit the transaction to safely get your
4353 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
4354 struct btrfs_root
*root
, struct dentry
*dentry
,
4357 struct btrfs_log_ctx
*ctx
)
4359 struct dentry
*parent
= dget_parent(dentry
);
4362 ret
= btrfs_log_inode_parent(trans
, root
, dentry
->d_inode
, parent
,
4363 start
, end
, 0, ctx
);
4370 * should be called during mount to recover any replay any log trees
4373 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
4376 struct btrfs_path
*path
;
4377 struct btrfs_trans_handle
*trans
;
4378 struct btrfs_key key
;
4379 struct btrfs_key found_key
;
4380 struct btrfs_key tmp_key
;
4381 struct btrfs_root
*log
;
4382 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
4383 struct walk_control wc
= {
4384 .process_func
= process_one_buffer
,
4388 path
= btrfs_alloc_path();
4392 fs_info
->log_root_recovering
= 1;
4394 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
4395 if (IS_ERR(trans
)) {
4396 ret
= PTR_ERR(trans
);
4403 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
4405 btrfs_error(fs_info
, ret
, "Failed to pin buffers while "
4406 "recovering log root tree.");
4411 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
4412 key
.offset
= (u64
)-1;
4413 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4416 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
4419 btrfs_error(fs_info
, ret
,
4420 "Couldn't find tree log root.");
4424 if (path
->slots
[0] == 0)
4428 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
4430 btrfs_release_path(path
);
4431 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
4434 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
4437 btrfs_error(fs_info
, ret
,
4438 "Couldn't read tree log root.");
4442 tmp_key
.objectid
= found_key
.offset
;
4443 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
4444 tmp_key
.offset
= (u64
)-1;
4446 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
4447 if (IS_ERR(wc
.replay_dest
)) {
4448 ret
= PTR_ERR(wc
.replay_dest
);
4449 free_extent_buffer(log
->node
);
4450 free_extent_buffer(log
->commit_root
);
4452 btrfs_error(fs_info
, ret
, "Couldn't read target root "
4453 "for tree log recovery.");
4457 wc
.replay_dest
->log_root
= log
;
4458 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
4459 ret
= walk_log_tree(trans
, log
, &wc
);
4461 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
4462 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
4466 key
.offset
= found_key
.offset
- 1;
4467 wc
.replay_dest
->log_root
= NULL
;
4468 free_extent_buffer(log
->node
);
4469 free_extent_buffer(log
->commit_root
);
4475 if (found_key
.offset
== 0)
4478 btrfs_release_path(path
);
4480 /* step one is to pin it all, step two is to replay just inodes */
4483 wc
.process_func
= replay_one_buffer
;
4484 wc
.stage
= LOG_WALK_REPLAY_INODES
;
4487 /* step three is to replay everything */
4488 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
4493 btrfs_free_path(path
);
4495 /* step 4: commit the transaction, which also unpins the blocks */
4496 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
4500 free_extent_buffer(log_root_tree
->node
);
4501 log_root_tree
->log_root
= NULL
;
4502 fs_info
->log_root_recovering
= 0;
4503 kfree(log_root_tree
);
4508 btrfs_end_transaction(wc
.trans
, fs_info
->tree_root
);
4509 btrfs_free_path(path
);
4514 * there are some corner cases where we want to force a full
4515 * commit instead of allowing a directory to be logged.
4517 * They revolve around files there were unlinked from the directory, and
4518 * this function updates the parent directory so that a full commit is
4519 * properly done if it is fsync'd later after the unlinks are done.
4521 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
4522 struct inode
*dir
, struct inode
*inode
,
4526 * when we're logging a file, if it hasn't been renamed
4527 * or unlinked, and its inode is fully committed on disk,
4528 * we don't have to worry about walking up the directory chain
4529 * to log its parents.
4531 * So, we use the last_unlink_trans field to put this transid
4532 * into the file. When the file is logged we check it and
4533 * don't log the parents if the file is fully on disk.
4535 if (S_ISREG(inode
->i_mode
))
4536 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4539 * if this directory was already logged any new
4540 * names for this file/dir will get recorded
4543 if (BTRFS_I(dir
)->logged_trans
== trans
->transid
)
4547 * if the inode we're about to unlink was logged,
4548 * the log will be properly updated for any new names
4550 if (BTRFS_I(inode
)->logged_trans
== trans
->transid
)
4554 * when renaming files across directories, if the directory
4555 * there we're unlinking from gets fsync'd later on, there's
4556 * no way to find the destination directory later and fsync it
4557 * properly. So, we have to be conservative and force commits
4558 * so the new name gets discovered.
4563 /* we can safely do the unlink without any special recording */
4567 BTRFS_I(dir
)->last_unlink_trans
= trans
->transid
;
4571 * Call this after adding a new name for a file and it will properly
4572 * update the log to reflect the new name.
4574 * It will return zero if all goes well, and it will return 1 if a
4575 * full transaction commit is required.
4577 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
4578 struct inode
*inode
, struct inode
*old_dir
,
4579 struct dentry
*parent
)
4581 struct btrfs_root
* root
= BTRFS_I(inode
)->root
;
4584 * this will force the logging code to walk the dentry chain
4587 if (S_ISREG(inode
->i_mode
))
4588 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4591 * if this inode hasn't been logged and directory we're renaming it
4592 * from hasn't been logged, we don't need to log it
4594 if (BTRFS_I(inode
)->logged_trans
<=
4595 root
->fs_info
->last_trans_committed
&&
4596 (!old_dir
|| BTRFS_I(old_dir
)->logged_trans
<=
4597 root
->fs_info
->last_trans_committed
))
4600 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 0,
4601 LLONG_MAX
, 1, NULL
);