2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
97 struct btrfs_root
*root
, struct inode
*inode
,
99 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
100 struct btrfs_root
*root
,
101 struct btrfs_path
*path
, u64 objectid
);
102 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
103 struct btrfs_root
*root
,
104 struct btrfs_root
*log
,
105 struct btrfs_path
*path
,
106 u64 dirid
, int del_all
);
109 * tree logging is a special write ahead log used to make sure that
110 * fsyncs and O_SYNCs can happen without doing full tree commits.
112 * Full tree commits are expensive because they require commonly
113 * modified blocks to be recowed, creating many dirty pages in the
114 * extent tree an 4x-6x higher write load than ext3.
116 * Instead of doing a tree commit on every fsync, we use the
117 * key ranges and transaction ids to find items for a given file or directory
118 * that have changed in this transaction. Those items are copied into
119 * a special tree (one per subvolume root), that tree is written to disk
120 * and then the fsync is considered complete.
122 * After a crash, items are copied out of the log-tree back into the
123 * subvolume tree. Any file data extents found are recorded in the extent
124 * allocation tree, and the log-tree freed.
126 * The log tree is read three times, once to pin down all the extents it is
127 * using in ram and once, once to create all the inodes logged in the tree
128 * and once to do all the other items.
132 * start a sub transaction and setup the log tree
133 * this increments the log tree writer count to make the people
134 * syncing the tree wait for us to finish
136 static int start_log_trans(struct btrfs_trans_handle
*trans
,
137 struct btrfs_root
*root
,
138 struct btrfs_log_ctx
*ctx
)
143 mutex_lock(&root
->log_mutex
);
144 if (root
->log_root
) {
145 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
149 if (!root
->log_start_pid
) {
150 root
->log_start_pid
= current
->pid
;
151 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
152 } else if (root
->log_start_pid
!= current
->pid
) {
153 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
156 atomic_inc(&root
->log_batch
);
157 atomic_inc(&root
->log_writers
);
159 index
= root
->log_transid
% 2;
160 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
161 ctx
->log_transid
= root
->log_transid
;
163 mutex_unlock(&root
->log_mutex
);
168 mutex_lock(&root
->fs_info
->tree_log_mutex
);
169 if (!root
->fs_info
->log_root_tree
)
170 ret
= btrfs_init_log_root_tree(trans
, root
->fs_info
);
171 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
175 if (!root
->log_root
) {
176 ret
= btrfs_add_log_tree(trans
, root
);
180 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
181 root
->log_start_pid
= current
->pid
;
182 atomic_inc(&root
->log_batch
);
183 atomic_inc(&root
->log_writers
);
185 index
= root
->log_transid
% 2;
186 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
187 ctx
->log_transid
= root
->log_transid
;
190 mutex_unlock(&root
->log_mutex
);
195 * returns 0 if there was a log transaction running and we were able
196 * to join, or returns -ENOENT if there were not transactions
199 static int join_running_log_trans(struct btrfs_root
*root
)
207 mutex_lock(&root
->log_mutex
);
208 if (root
->log_root
) {
210 atomic_inc(&root
->log_writers
);
212 mutex_unlock(&root
->log_mutex
);
217 * This either makes the current running log transaction wait
218 * until you call btrfs_end_log_trans() or it makes any future
219 * log transactions wait until you call btrfs_end_log_trans()
221 int btrfs_pin_log_trans(struct btrfs_root
*root
)
225 mutex_lock(&root
->log_mutex
);
226 atomic_inc(&root
->log_writers
);
227 mutex_unlock(&root
->log_mutex
);
232 * indicate we're done making changes to the log tree
233 * and wake up anyone waiting to do a sync
235 void btrfs_end_log_trans(struct btrfs_root
*root
)
237 if (atomic_dec_and_test(&root
->log_writers
)) {
239 if (waitqueue_active(&root
->log_writer_wait
))
240 wake_up(&root
->log_writer_wait
);
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
251 struct walk_control
{
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
267 /* pin only walk, we record which extents on disk belong to the
272 /* what stage of the replay code we're currently in */
275 /* the root we are currently replaying */
276 struct btrfs_root
*replay_dest
;
278 /* the trans handle for the current replay */
279 struct btrfs_trans_handle
*trans
;
281 /* the function that gets used to process blocks we find in the
282 * tree. Note the extent_buffer might not be up to date when it is
283 * passed in, and it must be checked or read if you need the data
286 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
287 struct walk_control
*wc
, u64 gen
);
291 * process_func used to pin down extents, write them or wait on them
293 static int process_one_buffer(struct btrfs_root
*log
,
294 struct extent_buffer
*eb
,
295 struct walk_control
*wc
, u64 gen
)
300 * If this fs is mixed then we need to be able to process the leaves to
301 * pin down any logged extents, so we have to read the block.
303 if (btrfs_fs_incompat(log
->fs_info
, MIXED_GROUPS
)) {
304 ret
= btrfs_read_buffer(eb
, gen
);
310 ret
= btrfs_pin_extent_for_log_replay(log
->fs_info
->extent_root
,
313 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
314 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
315 ret
= btrfs_exclude_logged_extents(log
, eb
);
317 btrfs_write_tree_block(eb
);
319 btrfs_wait_tree_block_writeback(eb
);
325 * Item overwrite used by replay and tree logging. eb, slot and key all refer
326 * to the src data we are copying out.
328 * root is the tree we are copying into, and path is a scratch
329 * path for use in this function (it should be released on entry and
330 * will be released on exit).
332 * If the key is already in the destination tree the existing item is
333 * overwritten. If the existing item isn't big enough, it is extended.
334 * If it is too large, it is truncated.
336 * If the key isn't in the destination yet, a new item is inserted.
338 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
339 struct btrfs_root
*root
,
340 struct btrfs_path
*path
,
341 struct extent_buffer
*eb
, int slot
,
342 struct btrfs_key
*key
)
346 u64 saved_i_size
= 0;
347 int save_old_i_size
= 0;
348 unsigned long src_ptr
;
349 unsigned long dst_ptr
;
350 int overwrite_root
= 0;
351 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
353 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
356 item_size
= btrfs_item_size_nr(eb
, slot
);
357 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
359 /* look for the key in the destination tree */
360 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
367 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
369 if (dst_size
!= item_size
)
372 if (item_size
== 0) {
373 btrfs_release_path(path
);
376 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
377 src_copy
= kmalloc(item_size
, GFP_NOFS
);
378 if (!dst_copy
|| !src_copy
) {
379 btrfs_release_path(path
);
385 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
387 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
388 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
390 ret
= memcmp(dst_copy
, src_copy
, item_size
);
395 * they have the same contents, just return, this saves
396 * us from cowing blocks in the destination tree and doing
397 * extra writes that may not have been done by a previous
401 btrfs_release_path(path
);
406 * We need to load the old nbytes into the inode so when we
407 * replay the extents we've logged we get the right nbytes.
410 struct btrfs_inode_item
*item
;
414 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
415 struct btrfs_inode_item
);
416 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
417 item
= btrfs_item_ptr(eb
, slot
,
418 struct btrfs_inode_item
);
419 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
422 * If this is a directory we need to reset the i_size to
423 * 0 so that we can set it up properly when replaying
424 * the rest of the items in this log.
426 mode
= btrfs_inode_mode(eb
, item
);
428 btrfs_set_inode_size(eb
, item
, 0);
430 } else if (inode_item
) {
431 struct btrfs_inode_item
*item
;
435 * New inode, set nbytes to 0 so that the nbytes comes out
436 * properly when we replay the extents.
438 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
439 btrfs_set_inode_nbytes(eb
, item
, 0);
442 * If this is a directory we need to reset the i_size to 0 so
443 * that we can set it up properly when replaying the rest of
444 * the items in this log.
446 mode
= btrfs_inode_mode(eb
, item
);
448 btrfs_set_inode_size(eb
, item
, 0);
451 btrfs_release_path(path
);
452 /* try to insert the key into the destination tree */
453 ret
= btrfs_insert_empty_item(trans
, root
, path
,
456 /* make sure any existing item is the correct size */
457 if (ret
== -EEXIST
) {
459 found_size
= btrfs_item_size_nr(path
->nodes
[0],
461 if (found_size
> item_size
)
462 btrfs_truncate_item(root
, path
, item_size
, 1);
463 else if (found_size
< item_size
)
464 btrfs_extend_item(root
, path
,
465 item_size
- found_size
);
469 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
472 /* don't overwrite an existing inode if the generation number
473 * was logged as zero. This is done when the tree logging code
474 * is just logging an inode to make sure it exists after recovery.
476 * Also, don't overwrite i_size on directories during replay.
477 * log replay inserts and removes directory items based on the
478 * state of the tree found in the subvolume, and i_size is modified
481 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
482 struct btrfs_inode_item
*src_item
;
483 struct btrfs_inode_item
*dst_item
;
485 src_item
= (struct btrfs_inode_item
*)src_ptr
;
486 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
488 if (btrfs_inode_generation(eb
, src_item
) == 0)
491 if (overwrite_root
&&
492 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
493 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
495 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
500 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
503 if (save_old_i_size
) {
504 struct btrfs_inode_item
*dst_item
;
505 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
506 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
509 /* make sure the generation is filled in */
510 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
511 struct btrfs_inode_item
*dst_item
;
512 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
513 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
514 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
519 btrfs_mark_buffer_dirty(path
->nodes
[0]);
520 btrfs_release_path(path
);
525 * simple helper to read an inode off the disk from a given root
526 * This can only be called for subvolume roots and not for the log
528 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
531 struct btrfs_key key
;
534 key
.objectid
= objectid
;
535 key
.type
= BTRFS_INODE_ITEM_KEY
;
537 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
540 } else if (is_bad_inode(inode
)) {
547 /* replays a single extent in 'eb' at 'slot' with 'key' into the
548 * subvolume 'root'. path is released on entry and should be released
551 * extents in the log tree have not been allocated out of the extent
552 * tree yet. So, this completes the allocation, taking a reference
553 * as required if the extent already exists or creating a new extent
554 * if it isn't in the extent allocation tree yet.
556 * The extent is inserted into the file, dropping any existing extents
557 * from the file that overlap the new one.
559 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
560 struct btrfs_root
*root
,
561 struct btrfs_path
*path
,
562 struct extent_buffer
*eb
, int slot
,
563 struct btrfs_key
*key
)
567 u64 start
= key
->offset
;
569 struct btrfs_file_extent_item
*item
;
570 struct inode
*inode
= NULL
;
574 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
575 found_type
= btrfs_file_extent_type(eb
, item
);
577 if (found_type
== BTRFS_FILE_EXTENT_REG
||
578 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
579 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
580 extent_end
= start
+ nbytes
;
583 * We don't add to the inodes nbytes if we are prealloc or a
586 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
588 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
589 size
= btrfs_file_extent_inline_len(eb
, slot
, item
);
590 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
591 extent_end
= ALIGN(start
+ size
, root
->sectorsize
);
597 inode
= read_one_inode(root
, key
->objectid
);
604 * first check to see if we already have this extent in the
605 * file. This must be done before the btrfs_drop_extents run
606 * so we don't try to drop this extent.
608 ret
= btrfs_lookup_file_extent(trans
, root
, path
, btrfs_ino(inode
),
612 (found_type
== BTRFS_FILE_EXTENT_REG
||
613 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
614 struct btrfs_file_extent_item cmp1
;
615 struct btrfs_file_extent_item cmp2
;
616 struct btrfs_file_extent_item
*existing
;
617 struct extent_buffer
*leaf
;
619 leaf
= path
->nodes
[0];
620 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
621 struct btrfs_file_extent_item
);
623 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
625 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
629 * we already have a pointer to this exact extent,
630 * we don't have to do anything
632 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
633 btrfs_release_path(path
);
637 btrfs_release_path(path
);
639 /* drop any overlapping extents */
640 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
644 if (found_type
== BTRFS_FILE_EXTENT_REG
||
645 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
647 unsigned long dest_offset
;
648 struct btrfs_key ins
;
650 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
654 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
656 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
657 (unsigned long)item
, sizeof(*item
));
659 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
660 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
661 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
662 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
664 if (ins
.objectid
> 0) {
667 LIST_HEAD(ordered_sums
);
669 * is this extent already allocated in the extent
670 * allocation tree? If so, just add a reference
672 ret
= btrfs_lookup_extent(root
, ins
.objectid
,
675 ret
= btrfs_inc_extent_ref(trans
, root
,
676 ins
.objectid
, ins
.offset
,
677 0, root
->root_key
.objectid
,
678 key
->objectid
, offset
, 0);
683 * insert the extent pointer in the extent
686 ret
= btrfs_alloc_logged_file_extent(trans
,
687 root
, root
->root_key
.objectid
,
688 key
->objectid
, offset
, &ins
);
692 btrfs_release_path(path
);
694 if (btrfs_file_extent_compression(eb
, item
)) {
695 csum_start
= ins
.objectid
;
696 csum_end
= csum_start
+ ins
.offset
;
698 csum_start
= ins
.objectid
+
699 btrfs_file_extent_offset(eb
, item
);
700 csum_end
= csum_start
+
701 btrfs_file_extent_num_bytes(eb
, item
);
704 ret
= btrfs_lookup_csums_range(root
->log_root
,
705 csum_start
, csum_end
- 1,
709 while (!list_empty(&ordered_sums
)) {
710 struct btrfs_ordered_sum
*sums
;
711 sums
= list_entry(ordered_sums
.next
,
712 struct btrfs_ordered_sum
,
715 ret
= btrfs_csum_file_blocks(trans
,
716 root
->fs_info
->csum_root
,
718 list_del(&sums
->list
);
724 btrfs_release_path(path
);
726 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
727 /* inline extents are easy, we just overwrite them */
728 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
733 inode_add_bytes(inode
, nbytes
);
734 ret
= btrfs_update_inode(trans
, root
, inode
);
742 * when cleaning up conflicts between the directory names in the
743 * subvolume, directory names in the log and directory names in the
744 * inode back references, we may have to unlink inodes from directories.
746 * This is a helper function to do the unlink of a specific directory
749 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
750 struct btrfs_root
*root
,
751 struct btrfs_path
*path
,
753 struct btrfs_dir_item
*di
)
758 struct extent_buffer
*leaf
;
759 struct btrfs_key location
;
762 leaf
= path
->nodes
[0];
764 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
765 name_len
= btrfs_dir_name_len(leaf
, di
);
766 name
= kmalloc(name_len
, GFP_NOFS
);
770 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
771 btrfs_release_path(path
);
773 inode
= read_one_inode(root
, location
.objectid
);
779 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
783 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
787 ret
= btrfs_run_delayed_items(trans
, root
);
795 * helper function to see if a given name and sequence number found
796 * in an inode back reference are already in a directory and correctly
797 * point to this inode
799 static noinline
int inode_in_dir(struct btrfs_root
*root
,
800 struct btrfs_path
*path
,
801 u64 dirid
, u64 objectid
, u64 index
,
802 const char *name
, int name_len
)
804 struct btrfs_dir_item
*di
;
805 struct btrfs_key location
;
808 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
809 index
, name
, name_len
, 0);
810 if (di
&& !IS_ERR(di
)) {
811 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
812 if (location
.objectid
!= objectid
)
816 btrfs_release_path(path
);
818 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
819 if (di
&& !IS_ERR(di
)) {
820 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
821 if (location
.objectid
!= objectid
)
827 btrfs_release_path(path
);
832 * helper function to check a log tree for a named back reference in
833 * an inode. This is used to decide if a back reference that is
834 * found in the subvolume conflicts with what we find in the log.
836 * inode backreferences may have multiple refs in a single item,
837 * during replay we process one reference at a time, and we don't
838 * want to delete valid links to a file from the subvolume if that
839 * link is also in the log.
841 static noinline
int backref_in_log(struct btrfs_root
*log
,
842 struct btrfs_key
*key
,
844 char *name
, int namelen
)
846 struct btrfs_path
*path
;
847 struct btrfs_inode_ref
*ref
;
849 unsigned long ptr_end
;
850 unsigned long name_ptr
;
856 path
= btrfs_alloc_path();
860 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
864 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
866 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
867 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
868 name
, namelen
, NULL
))
874 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
875 ptr_end
= ptr
+ item_size
;
876 while (ptr
< ptr_end
) {
877 ref
= (struct btrfs_inode_ref
*)ptr
;
878 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
879 if (found_name_len
== namelen
) {
880 name_ptr
= (unsigned long)(ref
+ 1);
881 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
888 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
891 btrfs_free_path(path
);
895 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
896 struct btrfs_root
*root
,
897 struct btrfs_path
*path
,
898 struct btrfs_root
*log_root
,
899 struct inode
*dir
, struct inode
*inode
,
900 struct extent_buffer
*eb
,
901 u64 inode_objectid
, u64 parent_objectid
,
902 u64 ref_index
, char *name
, int namelen
,
908 struct extent_buffer
*leaf
;
909 struct btrfs_dir_item
*di
;
910 struct btrfs_key search_key
;
911 struct btrfs_inode_extref
*extref
;
914 /* Search old style refs */
915 search_key
.objectid
= inode_objectid
;
916 search_key
.type
= BTRFS_INODE_REF_KEY
;
917 search_key
.offset
= parent_objectid
;
918 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
920 struct btrfs_inode_ref
*victim_ref
;
922 unsigned long ptr_end
;
924 leaf
= path
->nodes
[0];
926 /* are we trying to overwrite a back ref for the root directory
927 * if so, just jump out, we're done
929 if (search_key
.objectid
== search_key
.offset
)
932 /* check all the names in this back reference to see
933 * if they are in the log. if so, we allow them to stay
934 * otherwise they must be unlinked as a conflict
936 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
937 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
938 while (ptr
< ptr_end
) {
939 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
940 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
942 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
946 read_extent_buffer(leaf
, victim_name
,
947 (unsigned long)(victim_ref
+ 1),
950 if (!backref_in_log(log_root
, &search_key
,
955 btrfs_release_path(path
);
957 ret
= btrfs_unlink_inode(trans
, root
, dir
,
963 ret
= btrfs_run_delayed_items(trans
, root
);
971 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
975 * NOTE: we have searched root tree and checked the
976 * coresponding ref, it does not need to check again.
980 btrfs_release_path(path
);
982 /* Same search but for extended refs */
983 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
984 inode_objectid
, parent_objectid
, 0,
986 if (!IS_ERR_OR_NULL(extref
)) {
990 struct inode
*victim_parent
;
992 leaf
= path
->nodes
[0];
994 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
995 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
997 while (cur_offset
< item_size
) {
998 extref
= (struct btrfs_inode_extref
*)base
+ cur_offset
;
1000 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1002 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1005 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1008 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1011 search_key
.objectid
= inode_objectid
;
1012 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1013 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1017 if (!backref_in_log(log_root
, &search_key
,
1018 parent_objectid
, victim_name
,
1021 victim_parent
= read_one_inode(root
,
1023 if (victim_parent
) {
1025 btrfs_release_path(path
);
1027 ret
= btrfs_unlink_inode(trans
, root
,
1033 ret
= btrfs_run_delayed_items(
1036 iput(victim_parent
);
1047 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1051 btrfs_release_path(path
);
1053 /* look for a conflicting sequence number */
1054 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1055 ref_index
, name
, namelen
, 0);
1056 if (di
&& !IS_ERR(di
)) {
1057 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1061 btrfs_release_path(path
);
1063 /* look for a conflicing name */
1064 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1066 if (di
&& !IS_ERR(di
)) {
1067 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1071 btrfs_release_path(path
);
1076 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1077 u32
*namelen
, char **name
, u64
*index
,
1078 u64
*parent_objectid
)
1080 struct btrfs_inode_extref
*extref
;
1082 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1084 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1085 *name
= kmalloc(*namelen
, GFP_NOFS
);
1089 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1092 *index
= btrfs_inode_extref_index(eb
, extref
);
1093 if (parent_objectid
)
1094 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1099 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1100 u32
*namelen
, char **name
, u64
*index
)
1102 struct btrfs_inode_ref
*ref
;
1104 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1106 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1107 *name
= kmalloc(*namelen
, GFP_NOFS
);
1111 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1113 *index
= btrfs_inode_ref_index(eb
, ref
);
1119 * replay one inode back reference item found in the log tree.
1120 * eb, slot and key refer to the buffer and key found in the log tree.
1121 * root is the destination we are replaying into, and path is for temp
1122 * use by this function. (it should be released on return).
1124 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1125 struct btrfs_root
*root
,
1126 struct btrfs_root
*log
,
1127 struct btrfs_path
*path
,
1128 struct extent_buffer
*eb
, int slot
,
1129 struct btrfs_key
*key
)
1131 struct inode
*dir
= NULL
;
1132 struct inode
*inode
= NULL
;
1133 unsigned long ref_ptr
;
1134 unsigned long ref_end
;
1138 int search_done
= 0;
1139 int log_ref_ver
= 0;
1140 u64 parent_objectid
;
1143 int ref_struct_size
;
1145 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1146 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1148 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1149 struct btrfs_inode_extref
*r
;
1151 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1153 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1154 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1156 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1157 parent_objectid
= key
->offset
;
1159 inode_objectid
= key
->objectid
;
1162 * it is possible that we didn't log all the parent directories
1163 * for a given inode. If we don't find the dir, just don't
1164 * copy the back ref in. The link count fixup code will take
1167 dir
= read_one_inode(root
, parent_objectid
);
1173 inode
= read_one_inode(root
, inode_objectid
);
1179 while (ref_ptr
< ref_end
) {
1181 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1182 &ref_index
, &parent_objectid
);
1184 * parent object can change from one array
1188 dir
= read_one_inode(root
, parent_objectid
);
1194 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1200 /* if we already have a perfect match, we're done */
1201 if (!inode_in_dir(root
, path
, btrfs_ino(dir
), btrfs_ino(inode
),
1202 ref_index
, name
, namelen
)) {
1204 * look for a conflicting back reference in the
1205 * metadata. if we find one we have to unlink that name
1206 * of the file before we add our new link. Later on, we
1207 * overwrite any existing back reference, and we don't
1208 * want to create dangling pointers in the directory.
1212 ret
= __add_inode_ref(trans
, root
, path
, log
,
1216 ref_index
, name
, namelen
,
1225 /* insert our name */
1226 ret
= btrfs_add_link(trans
, dir
, inode
, name
, namelen
,
1231 btrfs_update_inode(trans
, root
, inode
);
1234 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1243 /* finally write the back reference in the inode */
1244 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1246 btrfs_release_path(path
);
1253 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1254 struct btrfs_root
*root
, u64 offset
)
1257 ret
= btrfs_find_item(root
, NULL
, BTRFS_ORPHAN_OBJECTID
,
1258 offset
, BTRFS_ORPHAN_ITEM_KEY
, NULL
);
1260 ret
= btrfs_insert_orphan_item(trans
, root
, offset
);
1264 static int count_inode_extrefs(struct btrfs_root
*root
,
1265 struct inode
*inode
, struct btrfs_path
*path
)
1269 unsigned int nlink
= 0;
1272 u64 inode_objectid
= btrfs_ino(inode
);
1275 struct btrfs_inode_extref
*extref
;
1276 struct extent_buffer
*leaf
;
1279 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1284 leaf
= path
->nodes
[0];
1285 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1286 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1288 while (cur_offset
< item_size
) {
1289 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1290 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1294 cur_offset
+= name_len
+ sizeof(*extref
);
1298 btrfs_release_path(path
);
1300 btrfs_release_path(path
);
1307 static int count_inode_refs(struct btrfs_root
*root
,
1308 struct inode
*inode
, struct btrfs_path
*path
)
1311 struct btrfs_key key
;
1312 unsigned int nlink
= 0;
1314 unsigned long ptr_end
;
1316 u64 ino
= btrfs_ino(inode
);
1319 key
.type
= BTRFS_INODE_REF_KEY
;
1320 key
.offset
= (u64
)-1;
1323 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1327 if (path
->slots
[0] == 0)
1332 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1334 if (key
.objectid
!= ino
||
1335 key
.type
!= BTRFS_INODE_REF_KEY
)
1337 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1338 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1340 while (ptr
< ptr_end
) {
1341 struct btrfs_inode_ref
*ref
;
1343 ref
= (struct btrfs_inode_ref
*)ptr
;
1344 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1346 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1350 if (key
.offset
== 0)
1352 if (path
->slots
[0] > 0) {
1357 btrfs_release_path(path
);
1359 btrfs_release_path(path
);
1365 * There are a few corners where the link count of the file can't
1366 * be properly maintained during replay. So, instead of adding
1367 * lots of complexity to the log code, we just scan the backrefs
1368 * for any file that has been through replay.
1370 * The scan will update the link count on the inode to reflect the
1371 * number of back refs found. If it goes down to zero, the iput
1372 * will free the inode.
1374 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1375 struct btrfs_root
*root
,
1376 struct inode
*inode
)
1378 struct btrfs_path
*path
;
1381 u64 ino
= btrfs_ino(inode
);
1383 path
= btrfs_alloc_path();
1387 ret
= count_inode_refs(root
, inode
, path
);
1393 ret
= count_inode_extrefs(root
, inode
, path
);
1404 if (nlink
!= inode
->i_nlink
) {
1405 set_nlink(inode
, nlink
);
1406 btrfs_update_inode(trans
, root
, inode
);
1408 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1410 if (inode
->i_nlink
== 0) {
1411 if (S_ISDIR(inode
->i_mode
)) {
1412 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1417 ret
= insert_orphan_item(trans
, root
, ino
);
1421 btrfs_free_path(path
);
1425 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1426 struct btrfs_root
*root
,
1427 struct btrfs_path
*path
)
1430 struct btrfs_key key
;
1431 struct inode
*inode
;
1433 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1434 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1435 key
.offset
= (u64
)-1;
1437 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1442 if (path
->slots
[0] == 0)
1447 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1448 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1449 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1452 ret
= btrfs_del_item(trans
, root
, path
);
1456 btrfs_release_path(path
);
1457 inode
= read_one_inode(root
, key
.offset
);
1461 ret
= fixup_inode_link_count(trans
, root
, inode
);
1467 * fixup on a directory may create new entries,
1468 * make sure we always look for the highset possible
1471 key
.offset
= (u64
)-1;
1475 btrfs_release_path(path
);
1481 * record a given inode in the fixup dir so we can check its link
1482 * count when replay is done. The link count is incremented here
1483 * so the inode won't go away until we check it
1485 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1486 struct btrfs_root
*root
,
1487 struct btrfs_path
*path
,
1490 struct btrfs_key key
;
1492 struct inode
*inode
;
1494 inode
= read_one_inode(root
, objectid
);
1498 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1499 btrfs_set_key_type(&key
, BTRFS_ORPHAN_ITEM_KEY
);
1500 key
.offset
= objectid
;
1502 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1504 btrfs_release_path(path
);
1506 if (!inode
->i_nlink
)
1507 set_nlink(inode
, 1);
1510 ret
= btrfs_update_inode(trans
, root
, inode
);
1511 } else if (ret
== -EEXIST
) {
1514 BUG(); /* Logic Error */
1522 * when replaying the log for a directory, we only insert names
1523 * for inodes that actually exist. This means an fsync on a directory
1524 * does not implicitly fsync all the new files in it
1526 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1527 struct btrfs_root
*root
,
1528 struct btrfs_path
*path
,
1529 u64 dirid
, u64 index
,
1530 char *name
, int name_len
, u8 type
,
1531 struct btrfs_key
*location
)
1533 struct inode
*inode
;
1537 inode
= read_one_inode(root
, location
->objectid
);
1541 dir
= read_one_inode(root
, dirid
);
1547 ret
= btrfs_add_link(trans
, dir
, inode
, name
, name_len
, 1, index
);
1549 /* FIXME, put inode into FIXUP list */
1557 * take a single entry in a log directory item and replay it into
1560 * if a conflicting item exists in the subdirectory already,
1561 * the inode it points to is unlinked and put into the link count
1564 * If a name from the log points to a file or directory that does
1565 * not exist in the FS, it is skipped. fsyncs on directories
1566 * do not force down inodes inside that directory, just changes to the
1567 * names or unlinks in a directory.
1569 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1570 struct btrfs_root
*root
,
1571 struct btrfs_path
*path
,
1572 struct extent_buffer
*eb
,
1573 struct btrfs_dir_item
*di
,
1574 struct btrfs_key
*key
)
1578 struct btrfs_dir_item
*dst_di
;
1579 struct btrfs_key found_key
;
1580 struct btrfs_key log_key
;
1585 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1587 dir
= read_one_inode(root
, key
->objectid
);
1591 name_len
= btrfs_dir_name_len(eb
, di
);
1592 name
= kmalloc(name_len
, GFP_NOFS
);
1598 log_type
= btrfs_dir_type(eb
, di
);
1599 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1602 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1603 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1608 btrfs_release_path(path
);
1610 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1611 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1613 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1614 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1623 if (IS_ERR_OR_NULL(dst_di
)) {
1624 /* we need a sequence number to insert, so we only
1625 * do inserts for the BTRFS_DIR_INDEX_KEY types
1627 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1632 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1633 /* the existing item matches the logged item */
1634 if (found_key
.objectid
== log_key
.objectid
&&
1635 found_key
.type
== log_key
.type
&&
1636 found_key
.offset
== log_key
.offset
&&
1637 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1642 * don't drop the conflicting directory entry if the inode
1643 * for the new entry doesn't exist
1648 ret
= drop_one_dir_item(trans
, root
, path
, dir
, dst_di
);
1652 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1655 btrfs_release_path(path
);
1656 if (!ret
&& update_size
) {
1657 btrfs_i_size_write(dir
, dir
->i_size
+ name_len
* 2);
1658 ret
= btrfs_update_inode(trans
, root
, dir
);
1665 btrfs_release_path(path
);
1666 ret
= insert_one_name(trans
, root
, path
, key
->objectid
, key
->offset
,
1667 name
, name_len
, log_type
, &log_key
);
1668 if (ret
&& ret
!= -ENOENT
)
1670 update_size
= false;
1676 * find all the names in a directory item and reconcile them into
1677 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1678 * one name in a directory item, but the same code gets used for
1679 * both directory index types
1681 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1682 struct btrfs_root
*root
,
1683 struct btrfs_path
*path
,
1684 struct extent_buffer
*eb
, int slot
,
1685 struct btrfs_key
*key
)
1688 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1689 struct btrfs_dir_item
*di
;
1692 unsigned long ptr_end
;
1694 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1695 ptr_end
= ptr
+ item_size
;
1696 while (ptr
< ptr_end
) {
1697 di
= (struct btrfs_dir_item
*)ptr
;
1698 if (verify_dir_item(root
, eb
, di
))
1700 name_len
= btrfs_dir_name_len(eb
, di
);
1701 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1704 ptr
= (unsigned long)(di
+ 1);
1711 * directory replay has two parts. There are the standard directory
1712 * items in the log copied from the subvolume, and range items
1713 * created in the log while the subvolume was logged.
1715 * The range items tell us which parts of the key space the log
1716 * is authoritative for. During replay, if a key in the subvolume
1717 * directory is in a logged range item, but not actually in the log
1718 * that means it was deleted from the directory before the fsync
1719 * and should be removed.
1721 static noinline
int find_dir_range(struct btrfs_root
*root
,
1722 struct btrfs_path
*path
,
1723 u64 dirid
, int key_type
,
1724 u64
*start_ret
, u64
*end_ret
)
1726 struct btrfs_key key
;
1728 struct btrfs_dir_log_item
*item
;
1732 if (*start_ret
== (u64
)-1)
1735 key
.objectid
= dirid
;
1736 key
.type
= key_type
;
1737 key
.offset
= *start_ret
;
1739 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1743 if (path
->slots
[0] == 0)
1748 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1750 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1754 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1755 struct btrfs_dir_log_item
);
1756 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1758 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1760 *start_ret
= key
.offset
;
1761 *end_ret
= found_end
;
1766 /* check the next slot in the tree to see if it is a valid item */
1767 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1768 if (path
->slots
[0] >= nritems
) {
1769 ret
= btrfs_next_leaf(root
, path
);
1776 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1778 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1782 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1783 struct btrfs_dir_log_item
);
1784 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1785 *start_ret
= key
.offset
;
1786 *end_ret
= found_end
;
1789 btrfs_release_path(path
);
1794 * this looks for a given directory item in the log. If the directory
1795 * item is not in the log, the item is removed and the inode it points
1798 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
1799 struct btrfs_root
*root
,
1800 struct btrfs_root
*log
,
1801 struct btrfs_path
*path
,
1802 struct btrfs_path
*log_path
,
1804 struct btrfs_key
*dir_key
)
1807 struct extent_buffer
*eb
;
1810 struct btrfs_dir_item
*di
;
1811 struct btrfs_dir_item
*log_di
;
1814 unsigned long ptr_end
;
1816 struct inode
*inode
;
1817 struct btrfs_key location
;
1820 eb
= path
->nodes
[0];
1821 slot
= path
->slots
[0];
1822 item_size
= btrfs_item_size_nr(eb
, slot
);
1823 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1824 ptr_end
= ptr
+ item_size
;
1825 while (ptr
< ptr_end
) {
1826 di
= (struct btrfs_dir_item
*)ptr
;
1827 if (verify_dir_item(root
, eb
, di
)) {
1832 name_len
= btrfs_dir_name_len(eb
, di
);
1833 name
= kmalloc(name_len
, GFP_NOFS
);
1838 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1841 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
1842 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
1845 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
1846 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
1852 if (!log_di
|| (IS_ERR(log_di
) && PTR_ERR(log_di
) == -ENOENT
)) {
1853 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
1854 btrfs_release_path(path
);
1855 btrfs_release_path(log_path
);
1856 inode
= read_one_inode(root
, location
.objectid
);
1862 ret
= link_to_fixup_dir(trans
, root
,
1863 path
, location
.objectid
);
1871 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1874 ret
= btrfs_run_delayed_items(trans
, root
);
1880 /* there might still be more names under this key
1881 * check and repeat if required
1883 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
1889 } else if (IS_ERR(log_di
)) {
1891 return PTR_ERR(log_di
);
1893 btrfs_release_path(log_path
);
1896 ptr
= (unsigned long)(di
+ 1);
1901 btrfs_release_path(path
);
1902 btrfs_release_path(log_path
);
1907 * deletion replay happens before we copy any new directory items
1908 * out of the log or out of backreferences from inodes. It
1909 * scans the log to find ranges of keys that log is authoritative for,
1910 * and then scans the directory to find items in those ranges that are
1911 * not present in the log.
1913 * Anything we don't find in the log is unlinked and removed from the
1916 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
1917 struct btrfs_root
*root
,
1918 struct btrfs_root
*log
,
1919 struct btrfs_path
*path
,
1920 u64 dirid
, int del_all
)
1924 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
1926 struct btrfs_key dir_key
;
1927 struct btrfs_key found_key
;
1928 struct btrfs_path
*log_path
;
1931 dir_key
.objectid
= dirid
;
1932 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
1933 log_path
= btrfs_alloc_path();
1937 dir
= read_one_inode(root
, dirid
);
1938 /* it isn't an error if the inode isn't there, that can happen
1939 * because we replay the deletes before we copy in the inode item
1943 btrfs_free_path(log_path
);
1951 range_end
= (u64
)-1;
1953 ret
= find_dir_range(log
, path
, dirid
, key_type
,
1954 &range_start
, &range_end
);
1959 dir_key
.offset
= range_start
;
1962 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
1967 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1968 if (path
->slots
[0] >= nritems
) {
1969 ret
= btrfs_next_leaf(root
, path
);
1973 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1975 if (found_key
.objectid
!= dirid
||
1976 found_key
.type
!= dir_key
.type
)
1979 if (found_key
.offset
> range_end
)
1982 ret
= check_item_in_log(trans
, root
, log
, path
,
1987 if (found_key
.offset
== (u64
)-1)
1989 dir_key
.offset
= found_key
.offset
+ 1;
1991 btrfs_release_path(path
);
1992 if (range_end
== (u64
)-1)
1994 range_start
= range_end
+ 1;
1999 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2000 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2001 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2002 btrfs_release_path(path
);
2006 btrfs_release_path(path
);
2007 btrfs_free_path(log_path
);
2013 * the process_func used to replay items from the log tree. This
2014 * gets called in two different stages. The first stage just looks
2015 * for inodes and makes sure they are all copied into the subvolume.
2017 * The second stage copies all the other item types from the log into
2018 * the subvolume. The two stage approach is slower, but gets rid of
2019 * lots of complexity around inodes referencing other inodes that exist
2020 * only in the log (references come from either directory items or inode
2023 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2024 struct walk_control
*wc
, u64 gen
)
2027 struct btrfs_path
*path
;
2028 struct btrfs_root
*root
= wc
->replay_dest
;
2029 struct btrfs_key key
;
2034 ret
= btrfs_read_buffer(eb
, gen
);
2038 level
= btrfs_header_level(eb
);
2043 path
= btrfs_alloc_path();
2047 nritems
= btrfs_header_nritems(eb
);
2048 for (i
= 0; i
< nritems
; i
++) {
2049 btrfs_item_key_to_cpu(eb
, &key
, i
);
2051 /* inode keys are done during the first stage */
2052 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2053 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2054 struct btrfs_inode_item
*inode_item
;
2057 inode_item
= btrfs_item_ptr(eb
, i
,
2058 struct btrfs_inode_item
);
2059 mode
= btrfs_inode_mode(eb
, inode_item
);
2060 if (S_ISDIR(mode
)) {
2061 ret
= replay_dir_deletes(wc
->trans
,
2062 root
, log
, path
, key
.objectid
, 0);
2066 ret
= overwrite_item(wc
->trans
, root
, path
,
2071 /* for regular files, make sure corresponding
2072 * orhpan item exist. extents past the new EOF
2073 * will be truncated later by orphan cleanup.
2075 if (S_ISREG(mode
)) {
2076 ret
= insert_orphan_item(wc
->trans
, root
,
2082 ret
= link_to_fixup_dir(wc
->trans
, root
,
2083 path
, key
.objectid
);
2088 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2089 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2090 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2096 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2099 /* these keys are simply copied */
2100 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2101 ret
= overwrite_item(wc
->trans
, root
, path
,
2105 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2106 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2107 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2109 if (ret
&& ret
!= -ENOENT
)
2112 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2113 ret
= replay_one_extent(wc
->trans
, root
, path
,
2117 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2118 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2124 btrfs_free_path(path
);
2128 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2129 struct btrfs_root
*root
,
2130 struct btrfs_path
*path
, int *level
,
2131 struct walk_control
*wc
)
2136 struct extent_buffer
*next
;
2137 struct extent_buffer
*cur
;
2138 struct extent_buffer
*parent
;
2142 WARN_ON(*level
< 0);
2143 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2145 while (*level
> 0) {
2146 WARN_ON(*level
< 0);
2147 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2148 cur
= path
->nodes
[*level
];
2150 WARN_ON(btrfs_header_level(cur
) != *level
);
2152 if (path
->slots
[*level
] >=
2153 btrfs_header_nritems(cur
))
2156 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2157 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2158 blocksize
= btrfs_level_size(root
, *level
- 1);
2160 parent
= path
->nodes
[*level
];
2161 root_owner
= btrfs_header_owner(parent
);
2163 next
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
2168 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2170 free_extent_buffer(next
);
2174 path
->slots
[*level
]++;
2176 ret
= btrfs_read_buffer(next
, ptr_gen
);
2178 free_extent_buffer(next
);
2183 btrfs_tree_lock(next
);
2184 btrfs_set_lock_blocking(next
);
2185 clean_tree_block(trans
, root
, next
);
2186 btrfs_wait_tree_block_writeback(next
);
2187 btrfs_tree_unlock(next
);
2190 WARN_ON(root_owner
!=
2191 BTRFS_TREE_LOG_OBJECTID
);
2192 ret
= btrfs_free_and_pin_reserved_extent(root
,
2195 free_extent_buffer(next
);
2199 free_extent_buffer(next
);
2202 ret
= btrfs_read_buffer(next
, ptr_gen
);
2204 free_extent_buffer(next
);
2208 WARN_ON(*level
<= 0);
2209 if (path
->nodes
[*level
-1])
2210 free_extent_buffer(path
->nodes
[*level
-1]);
2211 path
->nodes
[*level
-1] = next
;
2212 *level
= btrfs_header_level(next
);
2213 path
->slots
[*level
] = 0;
2216 WARN_ON(*level
< 0);
2217 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2219 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2225 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2226 struct btrfs_root
*root
,
2227 struct btrfs_path
*path
, int *level
,
2228 struct walk_control
*wc
)
2235 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2236 slot
= path
->slots
[i
];
2237 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2240 WARN_ON(*level
== 0);
2243 struct extent_buffer
*parent
;
2244 if (path
->nodes
[*level
] == root
->node
)
2245 parent
= path
->nodes
[*level
];
2247 parent
= path
->nodes
[*level
+ 1];
2249 root_owner
= btrfs_header_owner(parent
);
2250 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2251 btrfs_header_generation(path
->nodes
[*level
]));
2256 struct extent_buffer
*next
;
2258 next
= path
->nodes
[*level
];
2261 btrfs_tree_lock(next
);
2262 btrfs_set_lock_blocking(next
);
2263 clean_tree_block(trans
, root
, next
);
2264 btrfs_wait_tree_block_writeback(next
);
2265 btrfs_tree_unlock(next
);
2268 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2269 ret
= btrfs_free_and_pin_reserved_extent(root
,
2270 path
->nodes
[*level
]->start
,
2271 path
->nodes
[*level
]->len
);
2275 free_extent_buffer(path
->nodes
[*level
]);
2276 path
->nodes
[*level
] = NULL
;
2284 * drop the reference count on the tree rooted at 'snap'. This traverses
2285 * the tree freeing any blocks that have a ref count of zero after being
2288 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2289 struct btrfs_root
*log
, struct walk_control
*wc
)
2294 struct btrfs_path
*path
;
2297 path
= btrfs_alloc_path();
2301 level
= btrfs_header_level(log
->node
);
2303 path
->nodes
[level
] = log
->node
;
2304 extent_buffer_get(log
->node
);
2305 path
->slots
[level
] = 0;
2308 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2316 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2325 /* was the root node processed? if not, catch it here */
2326 if (path
->nodes
[orig_level
]) {
2327 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2328 btrfs_header_generation(path
->nodes
[orig_level
]));
2332 struct extent_buffer
*next
;
2334 next
= path
->nodes
[orig_level
];
2337 btrfs_tree_lock(next
);
2338 btrfs_set_lock_blocking(next
);
2339 clean_tree_block(trans
, log
, next
);
2340 btrfs_wait_tree_block_writeback(next
);
2341 btrfs_tree_unlock(next
);
2344 WARN_ON(log
->root_key
.objectid
!=
2345 BTRFS_TREE_LOG_OBJECTID
);
2346 ret
= btrfs_free_and_pin_reserved_extent(log
, next
->start
,
2354 btrfs_free_path(path
);
2359 * helper function to update the item for a given subvolumes log root
2360 * in the tree of log roots
2362 static int update_log_root(struct btrfs_trans_handle
*trans
,
2363 struct btrfs_root
*log
)
2367 if (log
->log_transid
== 1) {
2368 /* insert root item on the first sync */
2369 ret
= btrfs_insert_root(trans
, log
->fs_info
->log_root_tree
,
2370 &log
->root_key
, &log
->root_item
);
2372 ret
= btrfs_update_root(trans
, log
->fs_info
->log_root_tree
,
2373 &log
->root_key
, &log
->root_item
);
2378 static void wait_log_commit(struct btrfs_trans_handle
*trans
,
2379 struct btrfs_root
*root
, int transid
)
2382 int index
= transid
% 2;
2385 * we only allow two pending log transactions at a time,
2386 * so we know that if ours is more than 2 older than the
2387 * current transaction, we're done
2390 prepare_to_wait(&root
->log_commit_wait
[index
],
2391 &wait
, TASK_UNINTERRUPTIBLE
);
2392 mutex_unlock(&root
->log_mutex
);
2394 if (root
->log_transid_committed
< transid
&&
2395 atomic_read(&root
->log_commit
[index
]))
2398 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2399 mutex_lock(&root
->log_mutex
);
2400 } while (root
->log_transid_committed
< transid
&&
2401 atomic_read(&root
->log_commit
[index
]));
2404 static void wait_for_writer(struct btrfs_trans_handle
*trans
,
2405 struct btrfs_root
*root
)
2409 while (atomic_read(&root
->log_writers
)) {
2410 prepare_to_wait(&root
->log_writer_wait
,
2411 &wait
, TASK_UNINTERRUPTIBLE
);
2412 mutex_unlock(&root
->log_mutex
);
2413 if (atomic_read(&root
->log_writers
))
2415 mutex_lock(&root
->log_mutex
);
2416 finish_wait(&root
->log_writer_wait
, &wait
);
2420 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2421 struct btrfs_log_ctx
*ctx
)
2426 mutex_lock(&root
->log_mutex
);
2427 list_del_init(&ctx
->list
);
2428 mutex_unlock(&root
->log_mutex
);
2432 * Invoked in log mutex context, or be sure there is no other task which
2433 * can access the list.
2435 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2436 int index
, int error
)
2438 struct btrfs_log_ctx
*ctx
;
2441 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2445 list_for_each_entry(ctx
, &root
->log_ctxs
[index
], list
)
2446 ctx
->log_ret
= error
;
2448 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2452 * btrfs_sync_log does sends a given tree log down to the disk and
2453 * updates the super blocks to record it. When this call is done,
2454 * you know that any inodes previously logged are safely on disk only
2457 * Any other return value means you need to call btrfs_commit_transaction.
2458 * Some of the edge cases for fsyncing directories that have had unlinks
2459 * or renames done in the past mean that sometimes the only safe
2460 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2461 * that has happened.
2463 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2464 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
2470 struct btrfs_root
*log
= root
->log_root
;
2471 struct btrfs_root
*log_root_tree
= root
->fs_info
->log_root_tree
;
2472 int log_transid
= 0;
2473 struct btrfs_log_ctx root_log_ctx
;
2474 struct blk_plug plug
;
2476 mutex_lock(&root
->log_mutex
);
2477 log_transid
= ctx
->log_transid
;
2478 if (root
->log_transid_committed
>= log_transid
) {
2479 mutex_unlock(&root
->log_mutex
);
2480 return ctx
->log_ret
;
2483 index1
= log_transid
% 2;
2484 if (atomic_read(&root
->log_commit
[index1
])) {
2485 wait_log_commit(trans
, root
, log_transid
);
2486 mutex_unlock(&root
->log_mutex
);
2487 return ctx
->log_ret
;
2489 ASSERT(log_transid
== root
->log_transid
);
2490 atomic_set(&root
->log_commit
[index1
], 1);
2492 /* wait for previous tree log sync to complete */
2493 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2494 wait_log_commit(trans
, root
, log_transid
- 1);
2497 int batch
= atomic_read(&root
->log_batch
);
2498 /* when we're on an ssd, just kick the log commit out */
2499 if (!btrfs_test_opt(root
, SSD
) &&
2500 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
2501 mutex_unlock(&root
->log_mutex
);
2502 schedule_timeout_uninterruptible(1);
2503 mutex_lock(&root
->log_mutex
);
2505 wait_for_writer(trans
, root
);
2506 if (batch
== atomic_read(&root
->log_batch
))
2510 /* bail out if we need to do a full commit */
2511 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2513 btrfs_free_logged_extents(log
, log_transid
);
2514 mutex_unlock(&root
->log_mutex
);
2518 if (log_transid
% 2 == 0)
2519 mark
= EXTENT_DIRTY
;
2523 /* we start IO on all the marked extents here, but we don't actually
2524 * wait for them until later.
2526 blk_start_plug(&plug
);
2527 ret
= btrfs_write_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2529 blk_finish_plug(&plug
);
2530 btrfs_abort_transaction(trans
, root
, ret
);
2531 btrfs_free_logged_extents(log
, log_transid
);
2532 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2533 mutex_unlock(&root
->log_mutex
);
2537 btrfs_set_root_node(&log
->root_item
, log
->node
);
2539 root
->log_transid
++;
2540 log
->log_transid
= root
->log_transid
;
2541 root
->log_start_pid
= 0;
2543 * IO has been started, blocks of the log tree have WRITTEN flag set
2544 * in their headers. new modifications of the log will be written to
2545 * new positions. so it's safe to allow log writers to go in.
2547 mutex_unlock(&root
->log_mutex
);
2549 btrfs_init_log_ctx(&root_log_ctx
);
2551 mutex_lock(&log_root_tree
->log_mutex
);
2552 atomic_inc(&log_root_tree
->log_batch
);
2553 atomic_inc(&log_root_tree
->log_writers
);
2555 index2
= log_root_tree
->log_transid
% 2;
2556 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
2557 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
2559 mutex_unlock(&log_root_tree
->log_mutex
);
2561 ret
= update_log_root(trans
, log
);
2563 mutex_lock(&log_root_tree
->log_mutex
);
2564 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2566 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2567 wake_up(&log_root_tree
->log_writer_wait
);
2571 if (!list_empty(&root_log_ctx
.list
))
2572 list_del_init(&root_log_ctx
.list
);
2574 blk_finish_plug(&plug
);
2575 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2577 if (ret
!= -ENOSPC
) {
2578 btrfs_abort_transaction(trans
, root
, ret
);
2579 mutex_unlock(&log_root_tree
->log_mutex
);
2582 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2583 btrfs_free_logged_extents(log
, log_transid
);
2584 mutex_unlock(&log_root_tree
->log_mutex
);
2589 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
2590 mutex_unlock(&log_root_tree
->log_mutex
);
2591 ret
= root_log_ctx
.log_ret
;
2595 index2
= root_log_ctx
.log_transid
% 2;
2596 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2597 blk_finish_plug(&plug
);
2598 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2599 wait_log_commit(trans
, log_root_tree
,
2600 root_log_ctx
.log_transid
);
2601 btrfs_free_logged_extents(log
, log_transid
);
2602 mutex_unlock(&log_root_tree
->log_mutex
);
2603 ret
= root_log_ctx
.log_ret
;
2606 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
2607 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2609 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2610 wait_log_commit(trans
, log_root_tree
,
2611 root_log_ctx
.log_transid
- 1);
2614 wait_for_writer(trans
, log_root_tree
);
2617 * now that we've moved on to the tree of log tree roots,
2618 * check the full commit flag again
2620 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2621 blk_finish_plug(&plug
);
2622 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2623 btrfs_free_logged_extents(log
, log_transid
);
2624 mutex_unlock(&log_root_tree
->log_mutex
);
2626 goto out_wake_log_root
;
2629 ret
= btrfs_write_marked_extents(log_root_tree
,
2630 &log_root_tree
->dirty_log_pages
,
2631 EXTENT_DIRTY
| EXTENT_NEW
);
2632 blk_finish_plug(&plug
);
2634 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2635 btrfs_abort_transaction(trans
, root
, ret
);
2636 btrfs_free_logged_extents(log
, log_transid
);
2637 mutex_unlock(&log_root_tree
->log_mutex
);
2638 goto out_wake_log_root
;
2640 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2641 btrfs_wait_marked_extents(log_root_tree
,
2642 &log_root_tree
->dirty_log_pages
,
2643 EXTENT_NEW
| EXTENT_DIRTY
);
2644 btrfs_wait_logged_extents(log
, log_transid
);
2646 btrfs_set_super_log_root(root
->fs_info
->super_for_commit
,
2647 log_root_tree
->node
->start
);
2648 btrfs_set_super_log_root_level(root
->fs_info
->super_for_commit
,
2649 btrfs_header_level(log_root_tree
->node
));
2651 log_root_tree
->log_transid
++;
2652 mutex_unlock(&log_root_tree
->log_mutex
);
2655 * nobody else is going to jump in and write the the ctree
2656 * super here because the log_commit atomic below is protecting
2657 * us. We must be called with a transaction handle pinning
2658 * the running transaction open, so a full commit can't hop
2659 * in and cause problems either.
2661 ret
= write_ctree_super(trans
, root
->fs_info
->tree_root
, 1);
2663 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2664 btrfs_abort_transaction(trans
, root
, ret
);
2665 goto out_wake_log_root
;
2668 mutex_lock(&root
->log_mutex
);
2669 if (root
->last_log_commit
< log_transid
)
2670 root
->last_log_commit
= log_transid
;
2671 mutex_unlock(&root
->log_mutex
);
2675 * We needn't get log_mutex here because we are sure all
2676 * the other tasks are blocked.
2678 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
2680 mutex_lock(&log_root_tree
->log_mutex
);
2681 log_root_tree
->log_transid_committed
++;
2682 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
2683 mutex_unlock(&log_root_tree
->log_mutex
);
2685 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
2686 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
2689 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
2691 mutex_lock(&root
->log_mutex
);
2692 root
->log_transid_committed
++;
2693 atomic_set(&root
->log_commit
[index1
], 0);
2694 mutex_unlock(&root
->log_mutex
);
2696 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
2697 wake_up(&root
->log_commit_wait
[index1
]);
2701 static void free_log_tree(struct btrfs_trans_handle
*trans
,
2702 struct btrfs_root
*log
)
2707 struct walk_control wc
= {
2709 .process_func
= process_one_buffer
2712 ret
= walk_log_tree(trans
, log
, &wc
);
2713 /* I don't think this can happen but just in case */
2715 btrfs_abort_transaction(trans
, log
, ret
);
2718 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
2719 0, &start
, &end
, EXTENT_DIRTY
| EXTENT_NEW
,
2724 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
2725 EXTENT_DIRTY
| EXTENT_NEW
, GFP_NOFS
);
2729 * We may have short-circuited the log tree with the full commit logic
2730 * and left ordered extents on our list, so clear these out to keep us
2731 * from leaking inodes and memory.
2733 btrfs_free_logged_extents(log
, 0);
2734 btrfs_free_logged_extents(log
, 1);
2736 free_extent_buffer(log
->node
);
2741 * free all the extents used by the tree log. This should be called
2742 * at commit time of the full transaction
2744 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
2746 if (root
->log_root
) {
2747 free_log_tree(trans
, root
->log_root
);
2748 root
->log_root
= NULL
;
2753 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
2754 struct btrfs_fs_info
*fs_info
)
2756 if (fs_info
->log_root_tree
) {
2757 free_log_tree(trans
, fs_info
->log_root_tree
);
2758 fs_info
->log_root_tree
= NULL
;
2764 * If both a file and directory are logged, and unlinks or renames are
2765 * mixed in, we have a few interesting corners:
2767 * create file X in dir Y
2768 * link file X to X.link in dir Y
2770 * unlink file X but leave X.link
2773 * After a crash we would expect only X.link to exist. But file X
2774 * didn't get fsync'd again so the log has back refs for X and X.link.
2776 * We solve this by removing directory entries and inode backrefs from the
2777 * log when a file that was logged in the current transaction is
2778 * unlinked. Any later fsync will include the updated log entries, and
2779 * we'll be able to reconstruct the proper directory items from backrefs.
2781 * This optimizations allows us to avoid relogging the entire inode
2782 * or the entire directory.
2784 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
2785 struct btrfs_root
*root
,
2786 const char *name
, int name_len
,
2787 struct inode
*dir
, u64 index
)
2789 struct btrfs_root
*log
;
2790 struct btrfs_dir_item
*di
;
2791 struct btrfs_path
*path
;
2795 u64 dir_ino
= btrfs_ino(dir
);
2797 if (BTRFS_I(dir
)->logged_trans
< trans
->transid
)
2800 ret
= join_running_log_trans(root
);
2804 mutex_lock(&BTRFS_I(dir
)->log_mutex
);
2806 log
= root
->log_root
;
2807 path
= btrfs_alloc_path();
2813 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
2814 name
, name_len
, -1);
2820 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2821 bytes_del
+= name_len
;
2827 btrfs_release_path(path
);
2828 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
2829 index
, name
, name_len
, -1);
2835 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
2836 bytes_del
+= name_len
;
2843 /* update the directory size in the log to reflect the names
2847 struct btrfs_key key
;
2849 key
.objectid
= dir_ino
;
2851 key
.type
= BTRFS_INODE_ITEM_KEY
;
2852 btrfs_release_path(path
);
2854 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
2860 struct btrfs_inode_item
*item
;
2863 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2864 struct btrfs_inode_item
);
2865 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
2866 if (i_size
> bytes_del
)
2867 i_size
-= bytes_del
;
2870 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
2871 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2874 btrfs_release_path(path
);
2877 btrfs_free_path(path
);
2879 mutex_unlock(&BTRFS_I(dir
)->log_mutex
);
2880 if (ret
== -ENOSPC
) {
2881 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2884 btrfs_abort_transaction(trans
, root
, ret
);
2886 btrfs_end_log_trans(root
);
2891 /* see comments for btrfs_del_dir_entries_in_log */
2892 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
2893 struct btrfs_root
*root
,
2894 const char *name
, int name_len
,
2895 struct inode
*inode
, u64 dirid
)
2897 struct btrfs_root
*log
;
2901 if (BTRFS_I(inode
)->logged_trans
< trans
->transid
)
2904 ret
= join_running_log_trans(root
);
2907 log
= root
->log_root
;
2908 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
2910 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
2912 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
2913 if (ret
== -ENOSPC
) {
2914 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2916 } else if (ret
< 0 && ret
!= -ENOENT
)
2917 btrfs_abort_transaction(trans
, root
, ret
);
2918 btrfs_end_log_trans(root
);
2924 * creates a range item in the log for 'dirid'. first_offset and
2925 * last_offset tell us which parts of the key space the log should
2926 * be considered authoritative for.
2928 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
2929 struct btrfs_root
*log
,
2930 struct btrfs_path
*path
,
2931 int key_type
, u64 dirid
,
2932 u64 first_offset
, u64 last_offset
)
2935 struct btrfs_key key
;
2936 struct btrfs_dir_log_item
*item
;
2938 key
.objectid
= dirid
;
2939 key
.offset
= first_offset
;
2940 if (key_type
== BTRFS_DIR_ITEM_KEY
)
2941 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
2943 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
2944 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
2948 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2949 struct btrfs_dir_log_item
);
2950 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
2951 btrfs_mark_buffer_dirty(path
->nodes
[0]);
2952 btrfs_release_path(path
);
2957 * log all the items included in the current transaction for a given
2958 * directory. This also creates the range items in the log tree required
2959 * to replay anything deleted before the fsync
2961 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
2962 struct btrfs_root
*root
, struct inode
*inode
,
2963 struct btrfs_path
*path
,
2964 struct btrfs_path
*dst_path
, int key_type
,
2965 u64 min_offset
, u64
*last_offset_ret
)
2967 struct btrfs_key min_key
;
2968 struct btrfs_root
*log
= root
->log_root
;
2969 struct extent_buffer
*src
;
2974 u64 first_offset
= min_offset
;
2975 u64 last_offset
= (u64
)-1;
2976 u64 ino
= btrfs_ino(inode
);
2978 log
= root
->log_root
;
2980 min_key
.objectid
= ino
;
2981 min_key
.type
= key_type
;
2982 min_key
.offset
= min_offset
;
2984 path
->keep_locks
= 1;
2986 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
2989 * we didn't find anything from this transaction, see if there
2990 * is anything at all
2992 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
2993 min_key
.objectid
= ino
;
2994 min_key
.type
= key_type
;
2995 min_key
.offset
= (u64
)-1;
2996 btrfs_release_path(path
);
2997 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
2999 btrfs_release_path(path
);
3002 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3004 /* if ret == 0 there are items for this type,
3005 * create a range to tell us the last key of this type.
3006 * otherwise, there are no items in this directory after
3007 * *min_offset, and we create a range to indicate that.
3010 struct btrfs_key tmp
;
3011 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3013 if (key_type
== tmp
.type
)
3014 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3019 /* go backward to find any previous key */
3020 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3022 struct btrfs_key tmp
;
3023 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3024 if (key_type
== tmp
.type
) {
3025 first_offset
= tmp
.offset
;
3026 ret
= overwrite_item(trans
, log
, dst_path
,
3027 path
->nodes
[0], path
->slots
[0],
3035 btrfs_release_path(path
);
3037 /* find the first key from this transaction again */
3038 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3039 if (WARN_ON(ret
!= 0))
3043 * we have a block from this transaction, log every item in it
3044 * from our directory
3047 struct btrfs_key tmp
;
3048 src
= path
->nodes
[0];
3049 nritems
= btrfs_header_nritems(src
);
3050 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3051 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3053 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3055 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3062 path
->slots
[0] = nritems
;
3065 * look ahead to the next item and see if it is also
3066 * from this directory and from this transaction
3068 ret
= btrfs_next_leaf(root
, path
);
3070 last_offset
= (u64
)-1;
3073 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3074 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3075 last_offset
= (u64
)-1;
3078 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3079 ret
= overwrite_item(trans
, log
, dst_path
,
3080 path
->nodes
[0], path
->slots
[0],
3085 last_offset
= tmp
.offset
;
3090 btrfs_release_path(path
);
3091 btrfs_release_path(dst_path
);
3094 *last_offset_ret
= last_offset
;
3096 * insert the log range keys to indicate where the log
3099 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3100 ino
, first_offset
, last_offset
);
3108 * logging directories is very similar to logging inodes, We find all the items
3109 * from the current transaction and write them to the log.
3111 * The recovery code scans the directory in the subvolume, and if it finds a
3112 * key in the range logged that is not present in the log tree, then it means
3113 * that dir entry was unlinked during the transaction.
3115 * In order for that scan to work, we must include one key smaller than
3116 * the smallest logged by this transaction and one key larger than the largest
3117 * key logged by this transaction.
3119 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3120 struct btrfs_root
*root
, struct inode
*inode
,
3121 struct btrfs_path
*path
,
3122 struct btrfs_path
*dst_path
)
3127 int key_type
= BTRFS_DIR_ITEM_KEY
;
3133 ret
= log_dir_items(trans
, root
, inode
, path
,
3134 dst_path
, key_type
, min_key
,
3138 if (max_key
== (u64
)-1)
3140 min_key
= max_key
+ 1;
3143 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3144 key_type
= BTRFS_DIR_INDEX_KEY
;
3151 * a helper function to drop items from the log before we relog an
3152 * inode. max_key_type indicates the highest item type to remove.
3153 * This cannot be run for file data extents because it does not
3154 * free the extents they point to.
3156 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3157 struct btrfs_root
*log
,
3158 struct btrfs_path
*path
,
3159 u64 objectid
, int max_key_type
)
3162 struct btrfs_key key
;
3163 struct btrfs_key found_key
;
3166 key
.objectid
= objectid
;
3167 key
.type
= max_key_type
;
3168 key
.offset
= (u64
)-1;
3171 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3172 BUG_ON(ret
== 0); /* Logic error */
3176 if (path
->slots
[0] == 0)
3180 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3183 if (found_key
.objectid
!= objectid
)
3186 found_key
.offset
= 0;
3188 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3191 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3192 path
->slots
[0] - start_slot
+ 1);
3194 * If start slot isn't 0 then we don't need to re-search, we've
3195 * found the last guy with the objectid in this tree.
3197 if (ret
|| start_slot
!= 0)
3199 btrfs_release_path(path
);
3201 btrfs_release_path(path
);
3207 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3208 struct extent_buffer
*leaf
,
3209 struct btrfs_inode_item
*item
,
3210 struct inode
*inode
, int log_inode_only
)
3212 struct btrfs_map_token token
;
3214 btrfs_init_map_token(&token
);
3216 if (log_inode_only
) {
3217 /* set the generation to zero so the recover code
3218 * can tell the difference between an logging
3219 * just to say 'this inode exists' and a logging
3220 * to say 'update this inode with these values'
3222 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3223 btrfs_set_token_inode_size(leaf
, item
, 0, &token
);
3225 btrfs_set_token_inode_generation(leaf
, item
,
3226 BTRFS_I(inode
)->generation
,
3228 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3231 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3232 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3233 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3234 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3236 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_atime(item
),
3237 inode
->i_atime
.tv_sec
, &token
);
3238 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_atime(item
),
3239 inode
->i_atime
.tv_nsec
, &token
);
3241 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_mtime(item
),
3242 inode
->i_mtime
.tv_sec
, &token
);
3243 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_mtime(item
),
3244 inode
->i_mtime
.tv_nsec
, &token
);
3246 btrfs_set_token_timespec_sec(leaf
, btrfs_inode_ctime(item
),
3247 inode
->i_ctime
.tv_sec
, &token
);
3248 btrfs_set_token_timespec_nsec(leaf
, btrfs_inode_ctime(item
),
3249 inode
->i_ctime
.tv_nsec
, &token
);
3251 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3254 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3255 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3256 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3257 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3258 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3261 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3262 struct btrfs_root
*log
, struct btrfs_path
*path
,
3263 struct inode
*inode
)
3265 struct btrfs_inode_item
*inode_item
;
3268 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3269 &BTRFS_I(inode
)->location
,
3270 sizeof(*inode_item
));
3271 if (ret
&& ret
!= -EEXIST
)
3273 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3274 struct btrfs_inode_item
);
3275 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
, 0);
3276 btrfs_release_path(path
);
3280 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3281 struct inode
*inode
,
3282 struct btrfs_path
*dst_path
,
3283 struct btrfs_path
*src_path
, u64
*last_extent
,
3284 int start_slot
, int nr
, int inode_only
)
3286 unsigned long src_offset
;
3287 unsigned long dst_offset
;
3288 struct btrfs_root
*log
= BTRFS_I(inode
)->root
->log_root
;
3289 struct btrfs_file_extent_item
*extent
;
3290 struct btrfs_inode_item
*inode_item
;
3291 struct extent_buffer
*src
= src_path
->nodes
[0];
3292 struct btrfs_key first_key
, last_key
, key
;
3294 struct btrfs_key
*ins_keys
;
3298 struct list_head ordered_sums
;
3299 int skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3300 bool has_extents
= false;
3301 bool need_find_last_extent
= (*last_extent
== 0);
3304 INIT_LIST_HEAD(&ordered_sums
);
3306 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3307 nr
* sizeof(u32
), GFP_NOFS
);
3311 first_key
.objectid
= (u64
)-1;
3313 ins_sizes
= (u32
*)ins_data
;
3314 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3316 for (i
= 0; i
< nr
; i
++) {
3317 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3318 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3320 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3321 ins_keys
, ins_sizes
, nr
);
3327 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3328 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3329 dst_path
->slots
[0]);
3331 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3333 if ((i
== (nr
- 1)))
3334 last_key
= ins_keys
[i
];
3336 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3337 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3339 struct btrfs_inode_item
);
3340 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3341 inode
, inode_only
== LOG_INODE_EXISTS
);
3343 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3344 src_offset
, ins_sizes
[i
]);
3348 * We set need_find_last_extent here in case we know we were
3349 * processing other items and then walk into the first extent in
3350 * the inode. If we don't hit an extent then nothing changes,
3351 * we'll do the last search the next time around.
3353 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3355 if (need_find_last_extent
&&
3356 first_key
.objectid
== (u64
)-1)
3357 first_key
= ins_keys
[i
];
3359 need_find_last_extent
= false;
3362 /* take a reference on file data extents so that truncates
3363 * or deletes of this inode don't have to relog the inode
3366 if (btrfs_key_type(ins_keys
+ i
) == BTRFS_EXTENT_DATA_KEY
&&
3369 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3370 struct btrfs_file_extent_item
);
3372 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3375 found_type
= btrfs_file_extent_type(src
, extent
);
3376 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3378 ds
= btrfs_file_extent_disk_bytenr(src
,
3380 /* ds == 0 is a hole */
3384 dl
= btrfs_file_extent_disk_num_bytes(src
,
3386 cs
= btrfs_file_extent_offset(src
, extent
);
3387 cl
= btrfs_file_extent_num_bytes(src
,
3389 if (btrfs_file_extent_compression(src
,
3395 ret
= btrfs_lookup_csums_range(
3396 log
->fs_info
->csum_root
,
3397 ds
+ cs
, ds
+ cs
+ cl
- 1,
3400 btrfs_release_path(dst_path
);
3408 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3409 btrfs_release_path(dst_path
);
3413 * we have to do this after the loop above to avoid changing the
3414 * log tree while trying to change the log tree.
3417 while (!list_empty(&ordered_sums
)) {
3418 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3419 struct btrfs_ordered_sum
,
3422 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3423 list_del(&sums
->list
);
3431 * Because we use btrfs_search_forward we could skip leaves that were
3432 * not modified and then assume *last_extent is valid when it really
3433 * isn't. So back up to the previous leaf and read the end of the last
3434 * extent before we go and fill in holes.
3436 if (need_find_last_extent
) {
3439 ret
= btrfs_prev_leaf(BTRFS_I(inode
)->root
, src_path
);
3444 if (src_path
->slots
[0])
3445 src_path
->slots
[0]--;
3446 src
= src_path
->nodes
[0];
3447 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
3448 if (key
.objectid
!= btrfs_ino(inode
) ||
3449 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3451 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
3452 struct btrfs_file_extent_item
);
3453 if (btrfs_file_extent_type(src
, extent
) ==
3454 BTRFS_FILE_EXTENT_INLINE
) {
3455 len
= btrfs_file_extent_inline_len(src
,
3458 *last_extent
= ALIGN(key
.offset
+ len
,
3461 len
= btrfs_file_extent_num_bytes(src
, extent
);
3462 *last_extent
= key
.offset
+ len
;
3466 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3467 * things could have happened
3469 * 1) A merge could have happened, so we could currently be on a leaf
3470 * that holds what we were copying in the first place.
3471 * 2) A split could have happened, and now not all of the items we want
3472 * are on the same leaf.
3474 * So we need to adjust how we search for holes, we need to drop the
3475 * path and re-search for the first extent key we found, and then walk
3476 * forward until we hit the last one we copied.
3478 if (need_find_last_extent
) {
3479 /* btrfs_prev_leaf could return 1 without releasing the path */
3480 btrfs_release_path(src_path
);
3481 ret
= btrfs_search_slot(NULL
, BTRFS_I(inode
)->root
, &first_key
,
3486 src
= src_path
->nodes
[0];
3487 i
= src_path
->slots
[0];
3493 * Ok so here we need to go through and fill in any holes we may have
3494 * to make sure that holes are punched for those areas in case they had
3495 * extents previously.
3501 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
3502 ret
= btrfs_next_leaf(BTRFS_I(inode
)->root
, src_path
);
3506 src
= src_path
->nodes
[0];
3510 btrfs_item_key_to_cpu(src
, &key
, i
);
3511 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
3513 if (key
.objectid
!= btrfs_ino(inode
) ||
3514 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
3518 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
3519 if (btrfs_file_extent_type(src
, extent
) ==
3520 BTRFS_FILE_EXTENT_INLINE
) {
3521 len
= btrfs_file_extent_inline_len(src
, i
, extent
);
3522 extent_end
= ALIGN(key
.offset
+ len
, log
->sectorsize
);
3524 len
= btrfs_file_extent_num_bytes(src
, extent
);
3525 extent_end
= key
.offset
+ len
;
3529 if (*last_extent
== key
.offset
) {
3530 *last_extent
= extent_end
;
3533 offset
= *last_extent
;
3534 len
= key
.offset
- *last_extent
;
3535 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
3536 offset
, 0, 0, len
, 0, len
, 0,
3540 *last_extent
= offset
+ len
;
3543 * Need to let the callers know we dropped the path so they should
3546 if (!ret
&& need_find_last_extent
)
3551 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3553 struct extent_map
*em1
, *em2
;
3555 em1
= list_entry(a
, struct extent_map
, list
);
3556 em2
= list_entry(b
, struct extent_map
, list
);
3558 if (em1
->start
< em2
->start
)
3560 else if (em1
->start
> em2
->start
)
3565 static int log_one_extent(struct btrfs_trans_handle
*trans
,
3566 struct inode
*inode
, struct btrfs_root
*root
,
3567 struct extent_map
*em
, struct btrfs_path
*path
,
3568 struct list_head
*logged_list
)
3570 struct btrfs_root
*log
= root
->log_root
;
3571 struct btrfs_file_extent_item
*fi
;
3572 struct extent_buffer
*leaf
;
3573 struct btrfs_ordered_extent
*ordered
;
3574 struct list_head ordered_sums
;
3575 struct btrfs_map_token token
;
3576 struct btrfs_key key
;
3577 u64 mod_start
= em
->mod_start
;
3578 u64 mod_len
= em
->mod_len
;
3581 u64 extent_offset
= em
->start
- em
->orig_start
;
3584 bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3585 int extent_inserted
= 0;
3587 INIT_LIST_HEAD(&ordered_sums
);
3588 btrfs_init_map_token(&token
);
3590 ret
= __btrfs_drop_extents(trans
, log
, inode
, path
, em
->start
,
3591 em
->start
+ em
->len
, NULL
, 0, 1,
3592 sizeof(*fi
), &extent_inserted
);
3596 if (!extent_inserted
) {
3597 key
.objectid
= btrfs_ino(inode
);
3598 key
.type
= BTRFS_EXTENT_DATA_KEY
;
3599 key
.offset
= em
->start
;
3601 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
3606 leaf
= path
->nodes
[0];
3607 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3608 struct btrfs_file_extent_item
);
3610 btrfs_set_token_file_extent_generation(leaf
, fi
, em
->generation
,
3612 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
3614 btrfs_set_token_file_extent_type(leaf
, fi
,
3615 BTRFS_FILE_EXTENT_PREALLOC
,
3618 btrfs_set_token_file_extent_type(leaf
, fi
,
3619 BTRFS_FILE_EXTENT_REG
,
3621 if (em
->block_start
== EXTENT_MAP_HOLE
)
3625 block_len
= max(em
->block_len
, em
->orig_block_len
);
3626 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
3627 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3630 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3632 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
3633 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
3635 extent_offset
, &token
);
3636 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
3639 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
3640 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
3644 btrfs_set_token_file_extent_offset(leaf
, fi
,
3645 em
->start
- em
->orig_start
,
3647 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
3648 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
3649 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
3651 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
3652 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
3653 btrfs_mark_buffer_dirty(leaf
);
3655 btrfs_release_path(path
);
3664 * First check and see if our csums are on our outstanding ordered
3667 list_for_each_entry(ordered
, logged_list
, log_list
) {
3668 struct btrfs_ordered_sum
*sum
;
3673 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
3674 mod_start
+ mod_len
<= ordered
->file_offset
)
3678 * We are going to copy all the csums on this ordered extent, so
3679 * go ahead and adjust mod_start and mod_len in case this
3680 * ordered extent has already been logged.
3682 if (ordered
->file_offset
> mod_start
) {
3683 if (ordered
->file_offset
+ ordered
->len
>=
3684 mod_start
+ mod_len
)
3685 mod_len
= ordered
->file_offset
- mod_start
;
3687 * If we have this case
3689 * |--------- logged extent ---------|
3690 * |----- ordered extent ----|
3692 * Just don't mess with mod_start and mod_len, we'll
3693 * just end up logging more csums than we need and it
3697 if (ordered
->file_offset
+ ordered
->len
<
3698 mod_start
+ mod_len
) {
3699 mod_len
= (mod_start
+ mod_len
) -
3700 (ordered
->file_offset
+ ordered
->len
);
3701 mod_start
= ordered
->file_offset
+
3709 * To keep us from looping for the above case of an ordered
3710 * extent that falls inside of the logged extent.
3712 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
3716 if (ordered
->csum_bytes_left
) {
3717 btrfs_start_ordered_extent(inode
, ordered
, 0);
3718 wait_event(ordered
->wait
,
3719 ordered
->csum_bytes_left
== 0);
3722 list_for_each_entry(sum
, &ordered
->list
, list
) {
3723 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
3731 if (!mod_len
|| ret
)
3734 if (em
->compress_type
) {
3736 csum_len
= block_len
;
3738 csum_offset
= mod_start
- em
->start
;
3742 /* block start is already adjusted for the file extent offset. */
3743 ret
= btrfs_lookup_csums_range(log
->fs_info
->csum_root
,
3744 em
->block_start
+ csum_offset
,
3745 em
->block_start
+ csum_offset
+
3746 csum_len
- 1, &ordered_sums
, 0);
3750 while (!list_empty(&ordered_sums
)) {
3751 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3752 struct btrfs_ordered_sum
,
3755 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3756 list_del(&sums
->list
);
3763 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
3764 struct btrfs_root
*root
,
3765 struct inode
*inode
,
3766 struct btrfs_path
*path
,
3767 struct list_head
*logged_list
)
3769 struct extent_map
*em
, *n
;
3770 struct list_head extents
;
3771 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
3776 INIT_LIST_HEAD(&extents
);
3778 write_lock(&tree
->lock
);
3779 test_gen
= root
->fs_info
->last_trans_committed
;
3781 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
3782 list_del_init(&em
->list
);
3785 * Just an arbitrary number, this can be really CPU intensive
3786 * once we start getting a lot of extents, and really once we
3787 * have a bunch of extents we just want to commit since it will
3790 if (++num
> 32768) {
3791 list_del_init(&tree
->modified_extents
);
3796 if (em
->generation
<= test_gen
)
3798 /* Need a ref to keep it from getting evicted from cache */
3799 atomic_inc(&em
->refs
);
3800 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
3801 list_add_tail(&em
->list
, &extents
);
3805 list_sort(NULL
, &extents
, extent_cmp
);
3808 while (!list_empty(&extents
)) {
3809 em
= list_entry(extents
.next
, struct extent_map
, list
);
3811 list_del_init(&em
->list
);
3814 * If we had an error we just need to delete everybody from our
3818 clear_em_logging(tree
, em
);
3819 free_extent_map(em
);
3823 write_unlock(&tree
->lock
);
3825 ret
= log_one_extent(trans
, inode
, root
, em
, path
, logged_list
);
3826 write_lock(&tree
->lock
);
3827 clear_em_logging(tree
, em
);
3828 free_extent_map(em
);
3830 WARN_ON(!list_empty(&extents
));
3831 write_unlock(&tree
->lock
);
3833 btrfs_release_path(path
);
3837 /* log a single inode in the tree log.
3838 * At least one parent directory for this inode must exist in the tree
3839 * or be logged already.
3841 * Any items from this inode changed by the current transaction are copied
3842 * to the log tree. An extra reference is taken on any extents in this
3843 * file, allowing us to avoid a whole pile of corner cases around logging
3844 * blocks that have been removed from the tree.
3846 * See LOG_INODE_ALL and related defines for a description of what inode_only
3849 * This handles both files and directories.
3851 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
3852 struct btrfs_root
*root
, struct inode
*inode
,
3855 struct btrfs_path
*path
;
3856 struct btrfs_path
*dst_path
;
3857 struct btrfs_key min_key
;
3858 struct btrfs_key max_key
;
3859 struct btrfs_root
*log
= root
->log_root
;
3860 struct extent_buffer
*src
= NULL
;
3861 LIST_HEAD(logged_list
);
3862 u64 last_extent
= 0;
3866 int ins_start_slot
= 0;
3868 bool fast_search
= false;
3869 u64 ino
= btrfs_ino(inode
);
3871 path
= btrfs_alloc_path();
3874 dst_path
= btrfs_alloc_path();
3876 btrfs_free_path(path
);
3880 min_key
.objectid
= ino
;
3881 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
3884 max_key
.objectid
= ino
;
3887 /* today the code can only do partial logging of directories */
3888 if (S_ISDIR(inode
->i_mode
) ||
3889 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3890 &BTRFS_I(inode
)->runtime_flags
) &&
3891 inode_only
== LOG_INODE_EXISTS
))
3892 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3894 max_key
.type
= (u8
)-1;
3895 max_key
.offset
= (u64
)-1;
3897 /* Only run delayed items if we are a dir or a new file */
3898 if (S_ISDIR(inode
->i_mode
) ||
3899 BTRFS_I(inode
)->generation
> root
->fs_info
->last_trans_committed
) {
3900 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
3902 btrfs_free_path(path
);
3903 btrfs_free_path(dst_path
);
3908 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
3910 btrfs_get_logged_extents(inode
, &logged_list
);
3913 * a brute force approach to making sure we get the most uptodate
3914 * copies of everything.
3916 if (S_ISDIR(inode
->i_mode
)) {
3917 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
3919 if (inode_only
== LOG_INODE_EXISTS
)
3920 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
3921 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
3923 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3924 &BTRFS_I(inode
)->runtime_flags
)) {
3925 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3926 &BTRFS_I(inode
)->runtime_flags
);
3927 ret
= btrfs_truncate_inode_items(trans
, log
,
3929 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
3930 &BTRFS_I(inode
)->runtime_flags
) ||
3931 inode_only
== LOG_INODE_EXISTS
) {
3932 if (inode_only
== LOG_INODE_ALL
)
3934 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
3935 ret
= drop_objectid_items(trans
, log
, path
, ino
,
3938 if (inode_only
== LOG_INODE_ALL
)
3940 ret
= log_inode_item(trans
, log
, dst_path
, inode
);
3953 path
->keep_locks
= 1;
3957 ret
= btrfs_search_forward(root
, &min_key
,
3958 path
, trans
->transid
);
3962 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3963 if (min_key
.objectid
!= ino
)
3965 if (min_key
.type
> max_key
.type
)
3968 src
= path
->nodes
[0];
3969 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
3972 } else if (!ins_nr
) {
3973 ins_start_slot
= path
->slots
[0];
3978 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
3979 ins_start_slot
, ins_nr
, inode_only
);
3985 btrfs_release_path(path
);
3989 ins_start_slot
= path
->slots
[0];
3992 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3994 if (path
->slots
[0] < nritems
) {
3995 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
4000 ret
= copy_items(trans
, inode
, dst_path
, path
,
4001 &last_extent
, ins_start_slot
,
4002 ins_nr
, inode_only
);
4010 btrfs_release_path(path
);
4012 if (min_key
.offset
< (u64
)-1) {
4014 } else if (min_key
.type
< max_key
.type
) {
4022 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4023 ins_start_slot
, ins_nr
, inode_only
);
4033 btrfs_release_path(path
);
4034 btrfs_release_path(dst_path
);
4036 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
4042 } else if (inode_only
== LOG_INODE_ALL
) {
4043 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
4044 struct extent_map
*em
, *n
;
4046 write_lock(&tree
->lock
);
4047 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
)
4048 list_del_init(&em
->list
);
4049 write_unlock(&tree
->lock
);
4052 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->i_mode
)) {
4053 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
);
4059 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4060 BTRFS_I(inode
)->last_log_commit
= BTRFS_I(inode
)->last_sub_trans
;
4063 btrfs_put_logged_extents(&logged_list
);
4065 btrfs_submit_logged_extents(&logged_list
, log
);
4066 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
4068 btrfs_free_path(path
);
4069 btrfs_free_path(dst_path
);
4074 * follow the dentry parent pointers up the chain and see if any
4075 * of the directories in it require a full commit before they can
4076 * be logged. Returns zero if nothing special needs to be done or 1 if
4077 * a full commit is required.
4079 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
4080 struct inode
*inode
,
4081 struct dentry
*parent
,
4082 struct super_block
*sb
,
4086 struct btrfs_root
*root
;
4087 struct dentry
*old_parent
= NULL
;
4088 struct inode
*orig_inode
= inode
;
4091 * for regular files, if its inode is already on disk, we don't
4092 * have to worry about the parents at all. This is because
4093 * we can use the last_unlink_trans field to record renames
4094 * and other fun in this file.
4096 if (S_ISREG(inode
->i_mode
) &&
4097 BTRFS_I(inode
)->generation
<= last_committed
&&
4098 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
)
4101 if (!S_ISDIR(inode
->i_mode
)) {
4102 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4104 inode
= parent
->d_inode
;
4109 * If we are logging a directory then we start with our inode,
4110 * not our parents inode, so we need to skipp setting the
4111 * logged_trans so that further down in the log code we don't
4112 * think this inode has already been logged.
4114 if (inode
!= orig_inode
)
4115 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4118 if (BTRFS_I(inode
)->last_unlink_trans
> last_committed
) {
4119 root
= BTRFS_I(inode
)->root
;
4122 * make sure any commits to the log are forced
4123 * to be full commits
4125 btrfs_set_log_full_commit(root
->fs_info
, trans
);
4130 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4133 if (IS_ROOT(parent
))
4136 parent
= dget_parent(parent
);
4138 old_parent
= parent
;
4139 inode
= parent
->d_inode
;
4148 * helper function around btrfs_log_inode to make sure newly created
4149 * parent directories also end up in the log. A minimal inode and backref
4150 * only logging is done of any parent directories that are older than
4151 * the last committed transaction
4153 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
4154 struct btrfs_root
*root
, struct inode
*inode
,
4155 struct dentry
*parent
, int exists_only
,
4156 struct btrfs_log_ctx
*ctx
)
4158 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
4159 struct super_block
*sb
;
4160 struct dentry
*old_parent
= NULL
;
4162 u64 last_committed
= root
->fs_info
->last_trans_committed
;
4166 if (btrfs_test_opt(root
, NOTREELOG
)) {
4172 * The prev transaction commit doesn't complete, we need do
4173 * full commit by ourselves.
4175 if (root
->fs_info
->last_trans_log_full_commit
>
4176 root
->fs_info
->last_trans_committed
) {
4181 if (root
!= BTRFS_I(inode
)->root
||
4182 btrfs_root_refs(&root
->root_item
) == 0) {
4187 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
,
4188 sb
, last_committed
);
4192 if (btrfs_inode_in_log(inode
, trans
->transid
)) {
4193 ret
= BTRFS_NO_LOG_SYNC
;
4197 ret
= start_log_trans(trans
, root
, ctx
);
4201 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
);
4206 * for regular files, if its inode is already on disk, we don't
4207 * have to worry about the parents at all. This is because
4208 * we can use the last_unlink_trans field to record renames
4209 * and other fun in this file.
4211 if (S_ISREG(inode
->i_mode
) &&
4212 BTRFS_I(inode
)->generation
<= last_committed
&&
4213 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
) {
4218 inode_only
= LOG_INODE_EXISTS
;
4220 if (!parent
|| !parent
->d_inode
|| sb
!= parent
->d_inode
->i_sb
)
4223 inode
= parent
->d_inode
;
4224 if (root
!= BTRFS_I(inode
)->root
)
4227 if (BTRFS_I(inode
)->generation
>
4228 root
->fs_info
->last_trans_committed
) {
4229 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
);
4233 if (IS_ROOT(parent
))
4236 parent
= dget_parent(parent
);
4238 old_parent
= parent
;
4244 btrfs_set_log_full_commit(root
->fs_info
, trans
);
4249 btrfs_remove_log_ctx(root
, ctx
);
4250 btrfs_end_log_trans(root
);
4256 * it is not safe to log dentry if the chunk root has added new
4257 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4258 * If this returns 1, you must commit the transaction to safely get your
4261 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
4262 struct btrfs_root
*root
, struct dentry
*dentry
,
4263 struct btrfs_log_ctx
*ctx
)
4265 struct dentry
*parent
= dget_parent(dentry
);
4268 ret
= btrfs_log_inode_parent(trans
, root
, dentry
->d_inode
, parent
,
4276 * should be called during mount to recover any replay any log trees
4279 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
4282 struct btrfs_path
*path
;
4283 struct btrfs_trans_handle
*trans
;
4284 struct btrfs_key key
;
4285 struct btrfs_key found_key
;
4286 struct btrfs_key tmp_key
;
4287 struct btrfs_root
*log
;
4288 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
4289 struct walk_control wc
= {
4290 .process_func
= process_one_buffer
,
4294 path
= btrfs_alloc_path();
4298 fs_info
->log_root_recovering
= 1;
4300 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
4301 if (IS_ERR(trans
)) {
4302 ret
= PTR_ERR(trans
);
4309 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
4311 btrfs_error(fs_info
, ret
, "Failed to pin buffers while "
4312 "recovering log root tree.");
4317 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
4318 key
.offset
= (u64
)-1;
4319 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
4322 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
4325 btrfs_error(fs_info
, ret
,
4326 "Couldn't find tree log root.");
4330 if (path
->slots
[0] == 0)
4334 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
4336 btrfs_release_path(path
);
4337 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
4340 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
4343 btrfs_error(fs_info
, ret
,
4344 "Couldn't read tree log root.");
4348 tmp_key
.objectid
= found_key
.offset
;
4349 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
4350 tmp_key
.offset
= (u64
)-1;
4352 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
4353 if (IS_ERR(wc
.replay_dest
)) {
4354 ret
= PTR_ERR(wc
.replay_dest
);
4355 free_extent_buffer(log
->node
);
4356 free_extent_buffer(log
->commit_root
);
4358 btrfs_error(fs_info
, ret
, "Couldn't read target root "
4359 "for tree log recovery.");
4363 wc
.replay_dest
->log_root
= log
;
4364 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
4365 ret
= walk_log_tree(trans
, log
, &wc
);
4367 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
4368 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
4372 key
.offset
= found_key
.offset
- 1;
4373 wc
.replay_dest
->log_root
= NULL
;
4374 free_extent_buffer(log
->node
);
4375 free_extent_buffer(log
->commit_root
);
4381 if (found_key
.offset
== 0)
4384 btrfs_release_path(path
);
4386 /* step one is to pin it all, step two is to replay just inodes */
4389 wc
.process_func
= replay_one_buffer
;
4390 wc
.stage
= LOG_WALK_REPLAY_INODES
;
4393 /* step three is to replay everything */
4394 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
4399 btrfs_free_path(path
);
4401 /* step 4: commit the transaction, which also unpins the blocks */
4402 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
4406 free_extent_buffer(log_root_tree
->node
);
4407 log_root_tree
->log_root
= NULL
;
4408 fs_info
->log_root_recovering
= 0;
4409 kfree(log_root_tree
);
4414 btrfs_end_transaction(wc
.trans
, fs_info
->tree_root
);
4415 btrfs_free_path(path
);
4420 * there are some corner cases where we want to force a full
4421 * commit instead of allowing a directory to be logged.
4423 * They revolve around files there were unlinked from the directory, and
4424 * this function updates the parent directory so that a full commit is
4425 * properly done if it is fsync'd later after the unlinks are done.
4427 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
4428 struct inode
*dir
, struct inode
*inode
,
4432 * when we're logging a file, if it hasn't been renamed
4433 * or unlinked, and its inode is fully committed on disk,
4434 * we don't have to worry about walking up the directory chain
4435 * to log its parents.
4437 * So, we use the last_unlink_trans field to put this transid
4438 * into the file. When the file is logged we check it and
4439 * don't log the parents if the file is fully on disk.
4441 if (S_ISREG(inode
->i_mode
))
4442 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4445 * if this directory was already logged any new
4446 * names for this file/dir will get recorded
4449 if (BTRFS_I(dir
)->logged_trans
== trans
->transid
)
4453 * if the inode we're about to unlink was logged,
4454 * the log will be properly updated for any new names
4456 if (BTRFS_I(inode
)->logged_trans
== trans
->transid
)
4460 * when renaming files across directories, if the directory
4461 * there we're unlinking from gets fsync'd later on, there's
4462 * no way to find the destination directory later and fsync it
4463 * properly. So, we have to be conservative and force commits
4464 * so the new name gets discovered.
4469 /* we can safely do the unlink without any special recording */
4473 BTRFS_I(dir
)->last_unlink_trans
= trans
->transid
;
4477 * Call this after adding a new name for a file and it will properly
4478 * update the log to reflect the new name.
4480 * It will return zero if all goes well, and it will return 1 if a
4481 * full transaction commit is required.
4483 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
4484 struct inode
*inode
, struct inode
*old_dir
,
4485 struct dentry
*parent
)
4487 struct btrfs_root
* root
= BTRFS_I(inode
)->root
;
4490 * this will force the logging code to walk the dentry chain
4493 if (S_ISREG(inode
->i_mode
))
4494 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
4497 * if this inode hasn't been logged and directory we're renaming it
4498 * from hasn't been logged, we don't need to log it
4500 if (BTRFS_I(inode
)->logged_trans
<=
4501 root
->fs_info
->last_trans_committed
&&
4502 (!old_dir
|| BTRFS_I(old_dir
)->logged_trans
<=
4503 root
->fs_info
->last_trans_committed
))
4506 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 1, NULL
);