Btrfs: remove unnecessary memory barrier in btrfs_sync_log()
[deliverable/linux.git] / fs / btrfs / tree-log.c
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "disk-io.h"
26 #include "locking.h"
27 #include "print-tree.h"
28 #include "backref.h"
29 #include "tree-log.h"
30 #include "hash.h"
31
32 /* magic values for the inode_only field in btrfs_log_inode:
33 *
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * during log replay
37 */
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40
41 /*
42 * directory trouble cases
43 *
44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
45 * log, we must force a full commit before doing an fsync of the directory
46 * where the unlink was done.
47 * ---> record transid of last unlink/rename per directory
48 *
49 * mkdir foo/some_dir
50 * normal commit
51 * rename foo/some_dir foo2/some_dir
52 * mkdir foo/some_dir
53 * fsync foo/some_dir/some_file
54 *
55 * The fsync above will unlink the original some_dir without recording
56 * it in its new location (foo2). After a crash, some_dir will be gone
57 * unless the fsync of some_file forces a full commit
58 *
59 * 2) we must log any new names for any file or dir that is in the fsync
60 * log. ---> check inode while renaming/linking.
61 *
62 * 2a) we must log any new names for any file or dir during rename
63 * when the directory they are being removed from was logged.
64 * ---> check inode and old parent dir during rename
65 *
66 * 2a is actually the more important variant. With the extra logging
67 * a crash might unlink the old name without recreating the new one
68 *
69 * 3) after a crash, we must go through any directories with a link count
70 * of zero and redo the rm -rf
71 *
72 * mkdir f1/foo
73 * normal commit
74 * rm -rf f1/foo
75 * fsync(f1)
76 *
77 * The directory f1 was fully removed from the FS, but fsync was never
78 * called on f1, only its parent dir. After a crash the rm -rf must
79 * be replayed. This must be able to recurse down the entire
80 * directory tree. The inode link count fixup code takes care of the
81 * ugly details.
82 */
83
84 /*
85 * stages for the tree walking. The first
86 * stage (0) is to only pin down the blocks we find
87 * the second stage (1) is to make sure that all the inodes
88 * we find in the log are created in the subvolume.
89 *
90 * The last stage is to deal with directories and links and extents
91 * and all the other fun semantics
92 */
93 #define LOG_WALK_PIN_ONLY 0
94 #define LOG_WALK_REPLAY_INODES 1
95 #define LOG_WALK_REPLAY_DIR_INDEX 2
96 #define LOG_WALK_REPLAY_ALL 3
97
98 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root, struct inode *inode,
100 int inode_only);
101 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root,
103 struct btrfs_path *path, u64 objectid);
104 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root,
106 struct btrfs_root *log,
107 struct btrfs_path *path,
108 u64 dirid, int del_all);
109
110 /*
111 * tree logging is a special write ahead log used to make sure that
112 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 *
114 * Full tree commits are expensive because they require commonly
115 * modified blocks to be recowed, creating many dirty pages in the
116 * extent tree an 4x-6x higher write load than ext3.
117 *
118 * Instead of doing a tree commit on every fsync, we use the
119 * key ranges and transaction ids to find items for a given file or directory
120 * that have changed in this transaction. Those items are copied into
121 * a special tree (one per subvolume root), that tree is written to disk
122 * and then the fsync is considered complete.
123 *
124 * After a crash, items are copied out of the log-tree back into the
125 * subvolume tree. Any file data extents found are recorded in the extent
126 * allocation tree, and the log-tree freed.
127 *
128 * The log tree is read three times, once to pin down all the extents it is
129 * using in ram and once, once to create all the inodes logged in the tree
130 * and once to do all the other items.
131 */
132
133 /*
134 * start a sub transaction and setup the log tree
135 * this increments the log tree writer count to make the people
136 * syncing the tree wait for us to finish
137 */
138 static int start_log_trans(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root)
140 {
141 int ret;
142
143 mutex_lock(&root->log_mutex);
144 if (root->log_root) {
145 if (!root->log_start_pid) {
146 root->log_start_pid = current->pid;
147 root->log_multiple_pids = false;
148 } else if (root->log_start_pid != current->pid) {
149 root->log_multiple_pids = true;
150 }
151
152 atomic_inc(&root->log_batch);
153 atomic_inc(&root->log_writers);
154 mutex_unlock(&root->log_mutex);
155 return 0;
156 }
157
158 ret = 0;
159 mutex_lock(&root->fs_info->tree_log_mutex);
160 if (!root->fs_info->log_root_tree)
161 ret = btrfs_init_log_root_tree(trans, root->fs_info);
162 mutex_unlock(&root->fs_info->tree_log_mutex);
163 if (ret)
164 goto out;
165
166 if (!root->log_root) {
167 ret = btrfs_add_log_tree(trans, root);
168 if (ret)
169 goto out;
170 }
171 root->log_multiple_pids = false;
172 root->log_start_pid = current->pid;
173 atomic_inc(&root->log_batch);
174 atomic_inc(&root->log_writers);
175 out:
176 mutex_unlock(&root->log_mutex);
177 return ret;
178 }
179
180 /*
181 * returns 0 if there was a log transaction running and we were able
182 * to join, or returns -ENOENT if there were not transactions
183 * in progress
184 */
185 static int join_running_log_trans(struct btrfs_root *root)
186 {
187 int ret = -ENOENT;
188
189 smp_mb();
190 if (!root->log_root)
191 return -ENOENT;
192
193 mutex_lock(&root->log_mutex);
194 if (root->log_root) {
195 ret = 0;
196 atomic_inc(&root->log_writers);
197 }
198 mutex_unlock(&root->log_mutex);
199 return ret;
200 }
201
202 /*
203 * This either makes the current running log transaction wait
204 * until you call btrfs_end_log_trans() or it makes any future
205 * log transactions wait until you call btrfs_end_log_trans()
206 */
207 int btrfs_pin_log_trans(struct btrfs_root *root)
208 {
209 int ret = -ENOENT;
210
211 mutex_lock(&root->log_mutex);
212 atomic_inc(&root->log_writers);
213 mutex_unlock(&root->log_mutex);
214 return ret;
215 }
216
217 /*
218 * indicate we're done making changes to the log tree
219 * and wake up anyone waiting to do a sync
220 */
221 void btrfs_end_log_trans(struct btrfs_root *root)
222 {
223 if (atomic_dec_and_test(&root->log_writers)) {
224 smp_mb();
225 if (waitqueue_active(&root->log_writer_wait))
226 wake_up(&root->log_writer_wait);
227 }
228 }
229
230
231 /*
232 * the walk control struct is used to pass state down the chain when
233 * processing the log tree. The stage field tells us which part
234 * of the log tree processing we are currently doing. The others
235 * are state fields used for that specific part
236 */
237 struct walk_control {
238 /* should we free the extent on disk when done? This is used
239 * at transaction commit time while freeing a log tree
240 */
241 int free;
242
243 /* should we write out the extent buffer? This is used
244 * while flushing the log tree to disk during a sync
245 */
246 int write;
247
248 /* should we wait for the extent buffer io to finish? Also used
249 * while flushing the log tree to disk for a sync
250 */
251 int wait;
252
253 /* pin only walk, we record which extents on disk belong to the
254 * log trees
255 */
256 int pin;
257
258 /* what stage of the replay code we're currently in */
259 int stage;
260
261 /* the root we are currently replaying */
262 struct btrfs_root *replay_dest;
263
264 /* the trans handle for the current replay */
265 struct btrfs_trans_handle *trans;
266
267 /* the function that gets used to process blocks we find in the
268 * tree. Note the extent_buffer might not be up to date when it is
269 * passed in, and it must be checked or read if you need the data
270 * inside it
271 */
272 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
273 struct walk_control *wc, u64 gen);
274 };
275
276 /*
277 * process_func used to pin down extents, write them or wait on them
278 */
279 static int process_one_buffer(struct btrfs_root *log,
280 struct extent_buffer *eb,
281 struct walk_control *wc, u64 gen)
282 {
283 int ret = 0;
284
285 /*
286 * If this fs is mixed then we need to be able to process the leaves to
287 * pin down any logged extents, so we have to read the block.
288 */
289 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
290 ret = btrfs_read_buffer(eb, gen);
291 if (ret)
292 return ret;
293 }
294
295 if (wc->pin)
296 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
297 eb->start, eb->len);
298
299 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
300 if (wc->pin && btrfs_header_level(eb) == 0)
301 ret = btrfs_exclude_logged_extents(log, eb);
302 if (wc->write)
303 btrfs_write_tree_block(eb);
304 if (wc->wait)
305 btrfs_wait_tree_block_writeback(eb);
306 }
307 return ret;
308 }
309
310 /*
311 * Item overwrite used by replay and tree logging. eb, slot and key all refer
312 * to the src data we are copying out.
313 *
314 * root is the tree we are copying into, and path is a scratch
315 * path for use in this function (it should be released on entry and
316 * will be released on exit).
317 *
318 * If the key is already in the destination tree the existing item is
319 * overwritten. If the existing item isn't big enough, it is extended.
320 * If it is too large, it is truncated.
321 *
322 * If the key isn't in the destination yet, a new item is inserted.
323 */
324 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
325 struct btrfs_root *root,
326 struct btrfs_path *path,
327 struct extent_buffer *eb, int slot,
328 struct btrfs_key *key)
329 {
330 int ret;
331 u32 item_size;
332 u64 saved_i_size = 0;
333 int save_old_i_size = 0;
334 unsigned long src_ptr;
335 unsigned long dst_ptr;
336 int overwrite_root = 0;
337 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
338
339 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
340 overwrite_root = 1;
341
342 item_size = btrfs_item_size_nr(eb, slot);
343 src_ptr = btrfs_item_ptr_offset(eb, slot);
344
345 /* look for the key in the destination tree */
346 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
347 if (ret < 0)
348 return ret;
349
350 if (ret == 0) {
351 char *src_copy;
352 char *dst_copy;
353 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
354 path->slots[0]);
355 if (dst_size != item_size)
356 goto insert;
357
358 if (item_size == 0) {
359 btrfs_release_path(path);
360 return 0;
361 }
362 dst_copy = kmalloc(item_size, GFP_NOFS);
363 src_copy = kmalloc(item_size, GFP_NOFS);
364 if (!dst_copy || !src_copy) {
365 btrfs_release_path(path);
366 kfree(dst_copy);
367 kfree(src_copy);
368 return -ENOMEM;
369 }
370
371 read_extent_buffer(eb, src_copy, src_ptr, item_size);
372
373 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
374 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
375 item_size);
376 ret = memcmp(dst_copy, src_copy, item_size);
377
378 kfree(dst_copy);
379 kfree(src_copy);
380 /*
381 * they have the same contents, just return, this saves
382 * us from cowing blocks in the destination tree and doing
383 * extra writes that may not have been done by a previous
384 * sync
385 */
386 if (ret == 0) {
387 btrfs_release_path(path);
388 return 0;
389 }
390
391 /*
392 * We need to load the old nbytes into the inode so when we
393 * replay the extents we've logged we get the right nbytes.
394 */
395 if (inode_item) {
396 struct btrfs_inode_item *item;
397 u64 nbytes;
398 u32 mode;
399
400 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
401 struct btrfs_inode_item);
402 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
403 item = btrfs_item_ptr(eb, slot,
404 struct btrfs_inode_item);
405 btrfs_set_inode_nbytes(eb, item, nbytes);
406
407 /*
408 * If this is a directory we need to reset the i_size to
409 * 0 so that we can set it up properly when replaying
410 * the rest of the items in this log.
411 */
412 mode = btrfs_inode_mode(eb, item);
413 if (S_ISDIR(mode))
414 btrfs_set_inode_size(eb, item, 0);
415 }
416 } else if (inode_item) {
417 struct btrfs_inode_item *item;
418 u32 mode;
419
420 /*
421 * New inode, set nbytes to 0 so that the nbytes comes out
422 * properly when we replay the extents.
423 */
424 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
425 btrfs_set_inode_nbytes(eb, item, 0);
426
427 /*
428 * If this is a directory we need to reset the i_size to 0 so
429 * that we can set it up properly when replaying the rest of
430 * the items in this log.
431 */
432 mode = btrfs_inode_mode(eb, item);
433 if (S_ISDIR(mode))
434 btrfs_set_inode_size(eb, item, 0);
435 }
436 insert:
437 btrfs_release_path(path);
438 /* try to insert the key into the destination tree */
439 ret = btrfs_insert_empty_item(trans, root, path,
440 key, item_size);
441
442 /* make sure any existing item is the correct size */
443 if (ret == -EEXIST) {
444 u32 found_size;
445 found_size = btrfs_item_size_nr(path->nodes[0],
446 path->slots[0]);
447 if (found_size > item_size)
448 btrfs_truncate_item(root, path, item_size, 1);
449 else if (found_size < item_size)
450 btrfs_extend_item(root, path,
451 item_size - found_size);
452 } else if (ret) {
453 return ret;
454 }
455 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
456 path->slots[0]);
457
458 /* don't overwrite an existing inode if the generation number
459 * was logged as zero. This is done when the tree logging code
460 * is just logging an inode to make sure it exists after recovery.
461 *
462 * Also, don't overwrite i_size on directories during replay.
463 * log replay inserts and removes directory items based on the
464 * state of the tree found in the subvolume, and i_size is modified
465 * as it goes
466 */
467 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
468 struct btrfs_inode_item *src_item;
469 struct btrfs_inode_item *dst_item;
470
471 src_item = (struct btrfs_inode_item *)src_ptr;
472 dst_item = (struct btrfs_inode_item *)dst_ptr;
473
474 if (btrfs_inode_generation(eb, src_item) == 0)
475 goto no_copy;
476
477 if (overwrite_root &&
478 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
479 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
480 save_old_i_size = 1;
481 saved_i_size = btrfs_inode_size(path->nodes[0],
482 dst_item);
483 }
484 }
485
486 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
487 src_ptr, item_size);
488
489 if (save_old_i_size) {
490 struct btrfs_inode_item *dst_item;
491 dst_item = (struct btrfs_inode_item *)dst_ptr;
492 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
493 }
494
495 /* make sure the generation is filled in */
496 if (key->type == BTRFS_INODE_ITEM_KEY) {
497 struct btrfs_inode_item *dst_item;
498 dst_item = (struct btrfs_inode_item *)dst_ptr;
499 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
500 btrfs_set_inode_generation(path->nodes[0], dst_item,
501 trans->transid);
502 }
503 }
504 no_copy:
505 btrfs_mark_buffer_dirty(path->nodes[0]);
506 btrfs_release_path(path);
507 return 0;
508 }
509
510 /*
511 * simple helper to read an inode off the disk from a given root
512 * This can only be called for subvolume roots and not for the log
513 */
514 static noinline struct inode *read_one_inode(struct btrfs_root *root,
515 u64 objectid)
516 {
517 struct btrfs_key key;
518 struct inode *inode;
519
520 key.objectid = objectid;
521 key.type = BTRFS_INODE_ITEM_KEY;
522 key.offset = 0;
523 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
524 if (IS_ERR(inode)) {
525 inode = NULL;
526 } else if (is_bad_inode(inode)) {
527 iput(inode);
528 inode = NULL;
529 }
530 return inode;
531 }
532
533 /* replays a single extent in 'eb' at 'slot' with 'key' into the
534 * subvolume 'root'. path is released on entry and should be released
535 * on exit.
536 *
537 * extents in the log tree have not been allocated out of the extent
538 * tree yet. So, this completes the allocation, taking a reference
539 * as required if the extent already exists or creating a new extent
540 * if it isn't in the extent allocation tree yet.
541 *
542 * The extent is inserted into the file, dropping any existing extents
543 * from the file that overlap the new one.
544 */
545 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
546 struct btrfs_root *root,
547 struct btrfs_path *path,
548 struct extent_buffer *eb, int slot,
549 struct btrfs_key *key)
550 {
551 int found_type;
552 u64 extent_end;
553 u64 start = key->offset;
554 u64 nbytes = 0;
555 struct btrfs_file_extent_item *item;
556 struct inode *inode = NULL;
557 unsigned long size;
558 int ret = 0;
559
560 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
561 found_type = btrfs_file_extent_type(eb, item);
562
563 if (found_type == BTRFS_FILE_EXTENT_REG ||
564 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
565 nbytes = btrfs_file_extent_num_bytes(eb, item);
566 extent_end = start + nbytes;
567
568 /*
569 * We don't add to the inodes nbytes if we are prealloc or a
570 * hole.
571 */
572 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
573 nbytes = 0;
574 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
575 size = btrfs_file_extent_inline_len(eb, slot, item);
576 nbytes = btrfs_file_extent_ram_bytes(eb, item);
577 extent_end = ALIGN(start + size, root->sectorsize);
578 } else {
579 ret = 0;
580 goto out;
581 }
582
583 inode = read_one_inode(root, key->objectid);
584 if (!inode) {
585 ret = -EIO;
586 goto out;
587 }
588
589 /*
590 * first check to see if we already have this extent in the
591 * file. This must be done before the btrfs_drop_extents run
592 * so we don't try to drop this extent.
593 */
594 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
595 start, 0);
596
597 if (ret == 0 &&
598 (found_type == BTRFS_FILE_EXTENT_REG ||
599 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
600 struct btrfs_file_extent_item cmp1;
601 struct btrfs_file_extent_item cmp2;
602 struct btrfs_file_extent_item *existing;
603 struct extent_buffer *leaf;
604
605 leaf = path->nodes[0];
606 existing = btrfs_item_ptr(leaf, path->slots[0],
607 struct btrfs_file_extent_item);
608
609 read_extent_buffer(eb, &cmp1, (unsigned long)item,
610 sizeof(cmp1));
611 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
612 sizeof(cmp2));
613
614 /*
615 * we already have a pointer to this exact extent,
616 * we don't have to do anything
617 */
618 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
619 btrfs_release_path(path);
620 goto out;
621 }
622 }
623 btrfs_release_path(path);
624
625 /* drop any overlapping extents */
626 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
627 if (ret)
628 goto out;
629
630 if (found_type == BTRFS_FILE_EXTENT_REG ||
631 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
632 u64 offset;
633 unsigned long dest_offset;
634 struct btrfs_key ins;
635
636 ret = btrfs_insert_empty_item(trans, root, path, key,
637 sizeof(*item));
638 if (ret)
639 goto out;
640 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
641 path->slots[0]);
642 copy_extent_buffer(path->nodes[0], eb, dest_offset,
643 (unsigned long)item, sizeof(*item));
644
645 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
646 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
647 ins.type = BTRFS_EXTENT_ITEM_KEY;
648 offset = key->offset - btrfs_file_extent_offset(eb, item);
649
650 if (ins.objectid > 0) {
651 u64 csum_start;
652 u64 csum_end;
653 LIST_HEAD(ordered_sums);
654 /*
655 * is this extent already allocated in the extent
656 * allocation tree? If so, just add a reference
657 */
658 ret = btrfs_lookup_extent(root, ins.objectid,
659 ins.offset);
660 if (ret == 0) {
661 ret = btrfs_inc_extent_ref(trans, root,
662 ins.objectid, ins.offset,
663 0, root->root_key.objectid,
664 key->objectid, offset, 0);
665 if (ret)
666 goto out;
667 } else {
668 /*
669 * insert the extent pointer in the extent
670 * allocation tree
671 */
672 ret = btrfs_alloc_logged_file_extent(trans,
673 root, root->root_key.objectid,
674 key->objectid, offset, &ins);
675 if (ret)
676 goto out;
677 }
678 btrfs_release_path(path);
679
680 if (btrfs_file_extent_compression(eb, item)) {
681 csum_start = ins.objectid;
682 csum_end = csum_start + ins.offset;
683 } else {
684 csum_start = ins.objectid +
685 btrfs_file_extent_offset(eb, item);
686 csum_end = csum_start +
687 btrfs_file_extent_num_bytes(eb, item);
688 }
689
690 ret = btrfs_lookup_csums_range(root->log_root,
691 csum_start, csum_end - 1,
692 &ordered_sums, 0);
693 if (ret)
694 goto out;
695 while (!list_empty(&ordered_sums)) {
696 struct btrfs_ordered_sum *sums;
697 sums = list_entry(ordered_sums.next,
698 struct btrfs_ordered_sum,
699 list);
700 if (!ret)
701 ret = btrfs_csum_file_blocks(trans,
702 root->fs_info->csum_root,
703 sums);
704 list_del(&sums->list);
705 kfree(sums);
706 }
707 if (ret)
708 goto out;
709 } else {
710 btrfs_release_path(path);
711 }
712 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
713 /* inline extents are easy, we just overwrite them */
714 ret = overwrite_item(trans, root, path, eb, slot, key);
715 if (ret)
716 goto out;
717 }
718
719 inode_add_bytes(inode, nbytes);
720 ret = btrfs_update_inode(trans, root, inode);
721 out:
722 if (inode)
723 iput(inode);
724 return ret;
725 }
726
727 /*
728 * when cleaning up conflicts between the directory names in the
729 * subvolume, directory names in the log and directory names in the
730 * inode back references, we may have to unlink inodes from directories.
731 *
732 * This is a helper function to do the unlink of a specific directory
733 * item
734 */
735 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
736 struct btrfs_root *root,
737 struct btrfs_path *path,
738 struct inode *dir,
739 struct btrfs_dir_item *di)
740 {
741 struct inode *inode;
742 char *name;
743 int name_len;
744 struct extent_buffer *leaf;
745 struct btrfs_key location;
746 int ret;
747
748 leaf = path->nodes[0];
749
750 btrfs_dir_item_key_to_cpu(leaf, di, &location);
751 name_len = btrfs_dir_name_len(leaf, di);
752 name = kmalloc(name_len, GFP_NOFS);
753 if (!name)
754 return -ENOMEM;
755
756 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
757 btrfs_release_path(path);
758
759 inode = read_one_inode(root, location.objectid);
760 if (!inode) {
761 ret = -EIO;
762 goto out;
763 }
764
765 ret = link_to_fixup_dir(trans, root, path, location.objectid);
766 if (ret)
767 goto out;
768
769 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
770 if (ret)
771 goto out;
772 else
773 ret = btrfs_run_delayed_items(trans, root);
774 out:
775 kfree(name);
776 iput(inode);
777 return ret;
778 }
779
780 /*
781 * helper function to see if a given name and sequence number found
782 * in an inode back reference are already in a directory and correctly
783 * point to this inode
784 */
785 static noinline int inode_in_dir(struct btrfs_root *root,
786 struct btrfs_path *path,
787 u64 dirid, u64 objectid, u64 index,
788 const char *name, int name_len)
789 {
790 struct btrfs_dir_item *di;
791 struct btrfs_key location;
792 int match = 0;
793
794 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
795 index, name, name_len, 0);
796 if (di && !IS_ERR(di)) {
797 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
798 if (location.objectid != objectid)
799 goto out;
800 } else
801 goto out;
802 btrfs_release_path(path);
803
804 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
805 if (di && !IS_ERR(di)) {
806 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
807 if (location.objectid != objectid)
808 goto out;
809 } else
810 goto out;
811 match = 1;
812 out:
813 btrfs_release_path(path);
814 return match;
815 }
816
817 /*
818 * helper function to check a log tree for a named back reference in
819 * an inode. This is used to decide if a back reference that is
820 * found in the subvolume conflicts with what we find in the log.
821 *
822 * inode backreferences may have multiple refs in a single item,
823 * during replay we process one reference at a time, and we don't
824 * want to delete valid links to a file from the subvolume if that
825 * link is also in the log.
826 */
827 static noinline int backref_in_log(struct btrfs_root *log,
828 struct btrfs_key *key,
829 u64 ref_objectid,
830 char *name, int namelen)
831 {
832 struct btrfs_path *path;
833 struct btrfs_inode_ref *ref;
834 unsigned long ptr;
835 unsigned long ptr_end;
836 unsigned long name_ptr;
837 int found_name_len;
838 int item_size;
839 int ret;
840 int match = 0;
841
842 path = btrfs_alloc_path();
843 if (!path)
844 return -ENOMEM;
845
846 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
847 if (ret != 0)
848 goto out;
849
850 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
851
852 if (key->type == BTRFS_INODE_EXTREF_KEY) {
853 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
854 name, namelen, NULL))
855 match = 1;
856
857 goto out;
858 }
859
860 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
861 ptr_end = ptr + item_size;
862 while (ptr < ptr_end) {
863 ref = (struct btrfs_inode_ref *)ptr;
864 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
865 if (found_name_len == namelen) {
866 name_ptr = (unsigned long)(ref + 1);
867 ret = memcmp_extent_buffer(path->nodes[0], name,
868 name_ptr, namelen);
869 if (ret == 0) {
870 match = 1;
871 goto out;
872 }
873 }
874 ptr = (unsigned long)(ref + 1) + found_name_len;
875 }
876 out:
877 btrfs_free_path(path);
878 return match;
879 }
880
881 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
882 struct btrfs_root *root,
883 struct btrfs_path *path,
884 struct btrfs_root *log_root,
885 struct inode *dir, struct inode *inode,
886 struct extent_buffer *eb,
887 u64 inode_objectid, u64 parent_objectid,
888 u64 ref_index, char *name, int namelen,
889 int *search_done)
890 {
891 int ret;
892 char *victim_name;
893 int victim_name_len;
894 struct extent_buffer *leaf;
895 struct btrfs_dir_item *di;
896 struct btrfs_key search_key;
897 struct btrfs_inode_extref *extref;
898
899 again:
900 /* Search old style refs */
901 search_key.objectid = inode_objectid;
902 search_key.type = BTRFS_INODE_REF_KEY;
903 search_key.offset = parent_objectid;
904 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
905 if (ret == 0) {
906 struct btrfs_inode_ref *victim_ref;
907 unsigned long ptr;
908 unsigned long ptr_end;
909
910 leaf = path->nodes[0];
911
912 /* are we trying to overwrite a back ref for the root directory
913 * if so, just jump out, we're done
914 */
915 if (search_key.objectid == search_key.offset)
916 return 1;
917
918 /* check all the names in this back reference to see
919 * if they are in the log. if so, we allow them to stay
920 * otherwise they must be unlinked as a conflict
921 */
922 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
923 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
924 while (ptr < ptr_end) {
925 victim_ref = (struct btrfs_inode_ref *)ptr;
926 victim_name_len = btrfs_inode_ref_name_len(leaf,
927 victim_ref);
928 victim_name = kmalloc(victim_name_len, GFP_NOFS);
929 if (!victim_name)
930 return -ENOMEM;
931
932 read_extent_buffer(leaf, victim_name,
933 (unsigned long)(victim_ref + 1),
934 victim_name_len);
935
936 if (!backref_in_log(log_root, &search_key,
937 parent_objectid,
938 victim_name,
939 victim_name_len)) {
940 inc_nlink(inode);
941 btrfs_release_path(path);
942
943 ret = btrfs_unlink_inode(trans, root, dir,
944 inode, victim_name,
945 victim_name_len);
946 kfree(victim_name);
947 if (ret)
948 return ret;
949 ret = btrfs_run_delayed_items(trans, root);
950 if (ret)
951 return ret;
952 *search_done = 1;
953 goto again;
954 }
955 kfree(victim_name);
956
957 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
958 }
959
960 /*
961 * NOTE: we have searched root tree and checked the
962 * coresponding ref, it does not need to check again.
963 */
964 *search_done = 1;
965 }
966 btrfs_release_path(path);
967
968 /* Same search but for extended refs */
969 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
970 inode_objectid, parent_objectid, 0,
971 0);
972 if (!IS_ERR_OR_NULL(extref)) {
973 u32 item_size;
974 u32 cur_offset = 0;
975 unsigned long base;
976 struct inode *victim_parent;
977
978 leaf = path->nodes[0];
979
980 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
981 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
982
983 while (cur_offset < item_size) {
984 extref = (struct btrfs_inode_extref *)base + cur_offset;
985
986 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
987
988 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
989 goto next;
990
991 victim_name = kmalloc(victim_name_len, GFP_NOFS);
992 if (!victim_name)
993 return -ENOMEM;
994 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
995 victim_name_len);
996
997 search_key.objectid = inode_objectid;
998 search_key.type = BTRFS_INODE_EXTREF_KEY;
999 search_key.offset = btrfs_extref_hash(parent_objectid,
1000 victim_name,
1001 victim_name_len);
1002 ret = 0;
1003 if (!backref_in_log(log_root, &search_key,
1004 parent_objectid, victim_name,
1005 victim_name_len)) {
1006 ret = -ENOENT;
1007 victim_parent = read_one_inode(root,
1008 parent_objectid);
1009 if (victim_parent) {
1010 inc_nlink(inode);
1011 btrfs_release_path(path);
1012
1013 ret = btrfs_unlink_inode(trans, root,
1014 victim_parent,
1015 inode,
1016 victim_name,
1017 victim_name_len);
1018 if (!ret)
1019 ret = btrfs_run_delayed_items(
1020 trans, root);
1021 }
1022 iput(victim_parent);
1023 kfree(victim_name);
1024 if (ret)
1025 return ret;
1026 *search_done = 1;
1027 goto again;
1028 }
1029 kfree(victim_name);
1030 if (ret)
1031 return ret;
1032 next:
1033 cur_offset += victim_name_len + sizeof(*extref);
1034 }
1035 *search_done = 1;
1036 }
1037 btrfs_release_path(path);
1038
1039 /* look for a conflicting sequence number */
1040 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1041 ref_index, name, namelen, 0);
1042 if (di && !IS_ERR(di)) {
1043 ret = drop_one_dir_item(trans, root, path, dir, di);
1044 if (ret)
1045 return ret;
1046 }
1047 btrfs_release_path(path);
1048
1049 /* look for a conflicing name */
1050 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1051 name, namelen, 0);
1052 if (di && !IS_ERR(di)) {
1053 ret = drop_one_dir_item(trans, root, path, dir, di);
1054 if (ret)
1055 return ret;
1056 }
1057 btrfs_release_path(path);
1058
1059 return 0;
1060 }
1061
1062 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1063 u32 *namelen, char **name, u64 *index,
1064 u64 *parent_objectid)
1065 {
1066 struct btrfs_inode_extref *extref;
1067
1068 extref = (struct btrfs_inode_extref *)ref_ptr;
1069
1070 *namelen = btrfs_inode_extref_name_len(eb, extref);
1071 *name = kmalloc(*namelen, GFP_NOFS);
1072 if (*name == NULL)
1073 return -ENOMEM;
1074
1075 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1076 *namelen);
1077
1078 *index = btrfs_inode_extref_index(eb, extref);
1079 if (parent_objectid)
1080 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1081
1082 return 0;
1083 }
1084
1085 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1086 u32 *namelen, char **name, u64 *index)
1087 {
1088 struct btrfs_inode_ref *ref;
1089
1090 ref = (struct btrfs_inode_ref *)ref_ptr;
1091
1092 *namelen = btrfs_inode_ref_name_len(eb, ref);
1093 *name = kmalloc(*namelen, GFP_NOFS);
1094 if (*name == NULL)
1095 return -ENOMEM;
1096
1097 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1098
1099 *index = btrfs_inode_ref_index(eb, ref);
1100
1101 return 0;
1102 }
1103
1104 /*
1105 * replay one inode back reference item found in the log tree.
1106 * eb, slot and key refer to the buffer and key found in the log tree.
1107 * root is the destination we are replaying into, and path is for temp
1108 * use by this function. (it should be released on return).
1109 */
1110 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1111 struct btrfs_root *root,
1112 struct btrfs_root *log,
1113 struct btrfs_path *path,
1114 struct extent_buffer *eb, int slot,
1115 struct btrfs_key *key)
1116 {
1117 struct inode *dir = NULL;
1118 struct inode *inode = NULL;
1119 unsigned long ref_ptr;
1120 unsigned long ref_end;
1121 char *name = NULL;
1122 int namelen;
1123 int ret;
1124 int search_done = 0;
1125 int log_ref_ver = 0;
1126 u64 parent_objectid;
1127 u64 inode_objectid;
1128 u64 ref_index = 0;
1129 int ref_struct_size;
1130
1131 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1132 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1133
1134 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1135 struct btrfs_inode_extref *r;
1136
1137 ref_struct_size = sizeof(struct btrfs_inode_extref);
1138 log_ref_ver = 1;
1139 r = (struct btrfs_inode_extref *)ref_ptr;
1140 parent_objectid = btrfs_inode_extref_parent(eb, r);
1141 } else {
1142 ref_struct_size = sizeof(struct btrfs_inode_ref);
1143 parent_objectid = key->offset;
1144 }
1145 inode_objectid = key->objectid;
1146
1147 /*
1148 * it is possible that we didn't log all the parent directories
1149 * for a given inode. If we don't find the dir, just don't
1150 * copy the back ref in. The link count fixup code will take
1151 * care of the rest
1152 */
1153 dir = read_one_inode(root, parent_objectid);
1154 if (!dir) {
1155 ret = -ENOENT;
1156 goto out;
1157 }
1158
1159 inode = read_one_inode(root, inode_objectid);
1160 if (!inode) {
1161 ret = -EIO;
1162 goto out;
1163 }
1164
1165 while (ref_ptr < ref_end) {
1166 if (log_ref_ver) {
1167 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1168 &ref_index, &parent_objectid);
1169 /*
1170 * parent object can change from one array
1171 * item to another.
1172 */
1173 if (!dir)
1174 dir = read_one_inode(root, parent_objectid);
1175 if (!dir) {
1176 ret = -ENOENT;
1177 goto out;
1178 }
1179 } else {
1180 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1181 &ref_index);
1182 }
1183 if (ret)
1184 goto out;
1185
1186 /* if we already have a perfect match, we're done */
1187 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1188 ref_index, name, namelen)) {
1189 /*
1190 * look for a conflicting back reference in the
1191 * metadata. if we find one we have to unlink that name
1192 * of the file before we add our new link. Later on, we
1193 * overwrite any existing back reference, and we don't
1194 * want to create dangling pointers in the directory.
1195 */
1196
1197 if (!search_done) {
1198 ret = __add_inode_ref(trans, root, path, log,
1199 dir, inode, eb,
1200 inode_objectid,
1201 parent_objectid,
1202 ref_index, name, namelen,
1203 &search_done);
1204 if (ret) {
1205 if (ret == 1)
1206 ret = 0;
1207 goto out;
1208 }
1209 }
1210
1211 /* insert our name */
1212 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1213 0, ref_index);
1214 if (ret)
1215 goto out;
1216
1217 btrfs_update_inode(trans, root, inode);
1218 }
1219
1220 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1221 kfree(name);
1222 name = NULL;
1223 if (log_ref_ver) {
1224 iput(dir);
1225 dir = NULL;
1226 }
1227 }
1228
1229 /* finally write the back reference in the inode */
1230 ret = overwrite_item(trans, root, path, eb, slot, key);
1231 out:
1232 btrfs_release_path(path);
1233 kfree(name);
1234 iput(dir);
1235 iput(inode);
1236 return ret;
1237 }
1238
1239 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1240 struct btrfs_root *root, u64 offset)
1241 {
1242 int ret;
1243 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
1244 offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
1245 if (ret > 0)
1246 ret = btrfs_insert_orphan_item(trans, root, offset);
1247 return ret;
1248 }
1249
1250 static int count_inode_extrefs(struct btrfs_root *root,
1251 struct inode *inode, struct btrfs_path *path)
1252 {
1253 int ret = 0;
1254 int name_len;
1255 unsigned int nlink = 0;
1256 u32 item_size;
1257 u32 cur_offset = 0;
1258 u64 inode_objectid = btrfs_ino(inode);
1259 u64 offset = 0;
1260 unsigned long ptr;
1261 struct btrfs_inode_extref *extref;
1262 struct extent_buffer *leaf;
1263
1264 while (1) {
1265 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1266 &extref, &offset);
1267 if (ret)
1268 break;
1269
1270 leaf = path->nodes[0];
1271 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1272 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1273
1274 while (cur_offset < item_size) {
1275 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1276 name_len = btrfs_inode_extref_name_len(leaf, extref);
1277
1278 nlink++;
1279
1280 cur_offset += name_len + sizeof(*extref);
1281 }
1282
1283 offset++;
1284 btrfs_release_path(path);
1285 }
1286 btrfs_release_path(path);
1287
1288 if (ret < 0)
1289 return ret;
1290 return nlink;
1291 }
1292
1293 static int count_inode_refs(struct btrfs_root *root,
1294 struct inode *inode, struct btrfs_path *path)
1295 {
1296 int ret;
1297 struct btrfs_key key;
1298 unsigned int nlink = 0;
1299 unsigned long ptr;
1300 unsigned long ptr_end;
1301 int name_len;
1302 u64 ino = btrfs_ino(inode);
1303
1304 key.objectid = ino;
1305 key.type = BTRFS_INODE_REF_KEY;
1306 key.offset = (u64)-1;
1307
1308 while (1) {
1309 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1310 if (ret < 0)
1311 break;
1312 if (ret > 0) {
1313 if (path->slots[0] == 0)
1314 break;
1315 path->slots[0]--;
1316 }
1317 process_slot:
1318 btrfs_item_key_to_cpu(path->nodes[0], &key,
1319 path->slots[0]);
1320 if (key.objectid != ino ||
1321 key.type != BTRFS_INODE_REF_KEY)
1322 break;
1323 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1324 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1325 path->slots[0]);
1326 while (ptr < ptr_end) {
1327 struct btrfs_inode_ref *ref;
1328
1329 ref = (struct btrfs_inode_ref *)ptr;
1330 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1331 ref);
1332 ptr = (unsigned long)(ref + 1) + name_len;
1333 nlink++;
1334 }
1335
1336 if (key.offset == 0)
1337 break;
1338 if (path->slots[0] > 0) {
1339 path->slots[0]--;
1340 goto process_slot;
1341 }
1342 key.offset--;
1343 btrfs_release_path(path);
1344 }
1345 btrfs_release_path(path);
1346
1347 return nlink;
1348 }
1349
1350 /*
1351 * There are a few corners where the link count of the file can't
1352 * be properly maintained during replay. So, instead of adding
1353 * lots of complexity to the log code, we just scan the backrefs
1354 * for any file that has been through replay.
1355 *
1356 * The scan will update the link count on the inode to reflect the
1357 * number of back refs found. If it goes down to zero, the iput
1358 * will free the inode.
1359 */
1360 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1361 struct btrfs_root *root,
1362 struct inode *inode)
1363 {
1364 struct btrfs_path *path;
1365 int ret;
1366 u64 nlink = 0;
1367 u64 ino = btrfs_ino(inode);
1368
1369 path = btrfs_alloc_path();
1370 if (!path)
1371 return -ENOMEM;
1372
1373 ret = count_inode_refs(root, inode, path);
1374 if (ret < 0)
1375 goto out;
1376
1377 nlink = ret;
1378
1379 ret = count_inode_extrefs(root, inode, path);
1380 if (ret == -ENOENT)
1381 ret = 0;
1382
1383 if (ret < 0)
1384 goto out;
1385
1386 nlink += ret;
1387
1388 ret = 0;
1389
1390 if (nlink != inode->i_nlink) {
1391 set_nlink(inode, nlink);
1392 btrfs_update_inode(trans, root, inode);
1393 }
1394 BTRFS_I(inode)->index_cnt = (u64)-1;
1395
1396 if (inode->i_nlink == 0) {
1397 if (S_ISDIR(inode->i_mode)) {
1398 ret = replay_dir_deletes(trans, root, NULL, path,
1399 ino, 1);
1400 if (ret)
1401 goto out;
1402 }
1403 ret = insert_orphan_item(trans, root, ino);
1404 }
1405
1406 out:
1407 btrfs_free_path(path);
1408 return ret;
1409 }
1410
1411 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1412 struct btrfs_root *root,
1413 struct btrfs_path *path)
1414 {
1415 int ret;
1416 struct btrfs_key key;
1417 struct inode *inode;
1418
1419 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1420 key.type = BTRFS_ORPHAN_ITEM_KEY;
1421 key.offset = (u64)-1;
1422 while (1) {
1423 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1424 if (ret < 0)
1425 break;
1426
1427 if (ret == 1) {
1428 if (path->slots[0] == 0)
1429 break;
1430 path->slots[0]--;
1431 }
1432
1433 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1434 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1435 key.type != BTRFS_ORPHAN_ITEM_KEY)
1436 break;
1437
1438 ret = btrfs_del_item(trans, root, path);
1439 if (ret)
1440 goto out;
1441
1442 btrfs_release_path(path);
1443 inode = read_one_inode(root, key.offset);
1444 if (!inode)
1445 return -EIO;
1446
1447 ret = fixup_inode_link_count(trans, root, inode);
1448 iput(inode);
1449 if (ret)
1450 goto out;
1451
1452 /*
1453 * fixup on a directory may create new entries,
1454 * make sure we always look for the highset possible
1455 * offset
1456 */
1457 key.offset = (u64)-1;
1458 }
1459 ret = 0;
1460 out:
1461 btrfs_release_path(path);
1462 return ret;
1463 }
1464
1465
1466 /*
1467 * record a given inode in the fixup dir so we can check its link
1468 * count when replay is done. The link count is incremented here
1469 * so the inode won't go away until we check it
1470 */
1471 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1472 struct btrfs_root *root,
1473 struct btrfs_path *path,
1474 u64 objectid)
1475 {
1476 struct btrfs_key key;
1477 int ret = 0;
1478 struct inode *inode;
1479
1480 inode = read_one_inode(root, objectid);
1481 if (!inode)
1482 return -EIO;
1483
1484 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1485 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1486 key.offset = objectid;
1487
1488 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1489
1490 btrfs_release_path(path);
1491 if (ret == 0) {
1492 if (!inode->i_nlink)
1493 set_nlink(inode, 1);
1494 else
1495 inc_nlink(inode);
1496 ret = btrfs_update_inode(trans, root, inode);
1497 } else if (ret == -EEXIST) {
1498 ret = 0;
1499 } else {
1500 BUG(); /* Logic Error */
1501 }
1502 iput(inode);
1503
1504 return ret;
1505 }
1506
1507 /*
1508 * when replaying the log for a directory, we only insert names
1509 * for inodes that actually exist. This means an fsync on a directory
1510 * does not implicitly fsync all the new files in it
1511 */
1512 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1513 struct btrfs_root *root,
1514 struct btrfs_path *path,
1515 u64 dirid, u64 index,
1516 char *name, int name_len, u8 type,
1517 struct btrfs_key *location)
1518 {
1519 struct inode *inode;
1520 struct inode *dir;
1521 int ret;
1522
1523 inode = read_one_inode(root, location->objectid);
1524 if (!inode)
1525 return -ENOENT;
1526
1527 dir = read_one_inode(root, dirid);
1528 if (!dir) {
1529 iput(inode);
1530 return -EIO;
1531 }
1532
1533 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1534
1535 /* FIXME, put inode into FIXUP list */
1536
1537 iput(inode);
1538 iput(dir);
1539 return ret;
1540 }
1541
1542 /*
1543 * take a single entry in a log directory item and replay it into
1544 * the subvolume.
1545 *
1546 * if a conflicting item exists in the subdirectory already,
1547 * the inode it points to is unlinked and put into the link count
1548 * fix up tree.
1549 *
1550 * If a name from the log points to a file or directory that does
1551 * not exist in the FS, it is skipped. fsyncs on directories
1552 * do not force down inodes inside that directory, just changes to the
1553 * names or unlinks in a directory.
1554 */
1555 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1556 struct btrfs_root *root,
1557 struct btrfs_path *path,
1558 struct extent_buffer *eb,
1559 struct btrfs_dir_item *di,
1560 struct btrfs_key *key)
1561 {
1562 char *name;
1563 int name_len;
1564 struct btrfs_dir_item *dst_di;
1565 struct btrfs_key found_key;
1566 struct btrfs_key log_key;
1567 struct inode *dir;
1568 u8 log_type;
1569 int exists;
1570 int ret = 0;
1571 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1572
1573 dir = read_one_inode(root, key->objectid);
1574 if (!dir)
1575 return -EIO;
1576
1577 name_len = btrfs_dir_name_len(eb, di);
1578 name = kmalloc(name_len, GFP_NOFS);
1579 if (!name) {
1580 ret = -ENOMEM;
1581 goto out;
1582 }
1583
1584 log_type = btrfs_dir_type(eb, di);
1585 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1586 name_len);
1587
1588 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1589 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1590 if (exists == 0)
1591 exists = 1;
1592 else
1593 exists = 0;
1594 btrfs_release_path(path);
1595
1596 if (key->type == BTRFS_DIR_ITEM_KEY) {
1597 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1598 name, name_len, 1);
1599 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1600 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1601 key->objectid,
1602 key->offset, name,
1603 name_len, 1);
1604 } else {
1605 /* Corruption */
1606 ret = -EINVAL;
1607 goto out;
1608 }
1609 if (IS_ERR_OR_NULL(dst_di)) {
1610 /* we need a sequence number to insert, so we only
1611 * do inserts for the BTRFS_DIR_INDEX_KEY types
1612 */
1613 if (key->type != BTRFS_DIR_INDEX_KEY)
1614 goto out;
1615 goto insert;
1616 }
1617
1618 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1619 /* the existing item matches the logged item */
1620 if (found_key.objectid == log_key.objectid &&
1621 found_key.type == log_key.type &&
1622 found_key.offset == log_key.offset &&
1623 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1624 goto out;
1625 }
1626
1627 /*
1628 * don't drop the conflicting directory entry if the inode
1629 * for the new entry doesn't exist
1630 */
1631 if (!exists)
1632 goto out;
1633
1634 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1635 if (ret)
1636 goto out;
1637
1638 if (key->type == BTRFS_DIR_INDEX_KEY)
1639 goto insert;
1640 out:
1641 btrfs_release_path(path);
1642 if (!ret && update_size) {
1643 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1644 ret = btrfs_update_inode(trans, root, dir);
1645 }
1646 kfree(name);
1647 iput(dir);
1648 return ret;
1649
1650 insert:
1651 btrfs_release_path(path);
1652 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1653 name, name_len, log_type, &log_key);
1654 if (ret && ret != -ENOENT)
1655 goto out;
1656 update_size = false;
1657 ret = 0;
1658 goto out;
1659 }
1660
1661 /*
1662 * find all the names in a directory item and reconcile them into
1663 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1664 * one name in a directory item, but the same code gets used for
1665 * both directory index types
1666 */
1667 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1668 struct btrfs_root *root,
1669 struct btrfs_path *path,
1670 struct extent_buffer *eb, int slot,
1671 struct btrfs_key *key)
1672 {
1673 int ret;
1674 u32 item_size = btrfs_item_size_nr(eb, slot);
1675 struct btrfs_dir_item *di;
1676 int name_len;
1677 unsigned long ptr;
1678 unsigned long ptr_end;
1679
1680 ptr = btrfs_item_ptr_offset(eb, slot);
1681 ptr_end = ptr + item_size;
1682 while (ptr < ptr_end) {
1683 di = (struct btrfs_dir_item *)ptr;
1684 if (verify_dir_item(root, eb, di))
1685 return -EIO;
1686 name_len = btrfs_dir_name_len(eb, di);
1687 ret = replay_one_name(trans, root, path, eb, di, key);
1688 if (ret)
1689 return ret;
1690 ptr = (unsigned long)(di + 1);
1691 ptr += name_len;
1692 }
1693 return 0;
1694 }
1695
1696 /*
1697 * directory replay has two parts. There are the standard directory
1698 * items in the log copied from the subvolume, and range items
1699 * created in the log while the subvolume was logged.
1700 *
1701 * The range items tell us which parts of the key space the log
1702 * is authoritative for. During replay, if a key in the subvolume
1703 * directory is in a logged range item, but not actually in the log
1704 * that means it was deleted from the directory before the fsync
1705 * and should be removed.
1706 */
1707 static noinline int find_dir_range(struct btrfs_root *root,
1708 struct btrfs_path *path,
1709 u64 dirid, int key_type,
1710 u64 *start_ret, u64 *end_ret)
1711 {
1712 struct btrfs_key key;
1713 u64 found_end;
1714 struct btrfs_dir_log_item *item;
1715 int ret;
1716 int nritems;
1717
1718 if (*start_ret == (u64)-1)
1719 return 1;
1720
1721 key.objectid = dirid;
1722 key.type = key_type;
1723 key.offset = *start_ret;
1724
1725 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1726 if (ret < 0)
1727 goto out;
1728 if (ret > 0) {
1729 if (path->slots[0] == 0)
1730 goto out;
1731 path->slots[0]--;
1732 }
1733 if (ret != 0)
1734 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1735
1736 if (key.type != key_type || key.objectid != dirid) {
1737 ret = 1;
1738 goto next;
1739 }
1740 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1741 struct btrfs_dir_log_item);
1742 found_end = btrfs_dir_log_end(path->nodes[0], item);
1743
1744 if (*start_ret >= key.offset && *start_ret <= found_end) {
1745 ret = 0;
1746 *start_ret = key.offset;
1747 *end_ret = found_end;
1748 goto out;
1749 }
1750 ret = 1;
1751 next:
1752 /* check the next slot in the tree to see if it is a valid item */
1753 nritems = btrfs_header_nritems(path->nodes[0]);
1754 if (path->slots[0] >= nritems) {
1755 ret = btrfs_next_leaf(root, path);
1756 if (ret)
1757 goto out;
1758 } else {
1759 path->slots[0]++;
1760 }
1761
1762 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1763
1764 if (key.type != key_type || key.objectid != dirid) {
1765 ret = 1;
1766 goto out;
1767 }
1768 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1769 struct btrfs_dir_log_item);
1770 found_end = btrfs_dir_log_end(path->nodes[0], item);
1771 *start_ret = key.offset;
1772 *end_ret = found_end;
1773 ret = 0;
1774 out:
1775 btrfs_release_path(path);
1776 return ret;
1777 }
1778
1779 /*
1780 * this looks for a given directory item in the log. If the directory
1781 * item is not in the log, the item is removed and the inode it points
1782 * to is unlinked
1783 */
1784 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root,
1786 struct btrfs_root *log,
1787 struct btrfs_path *path,
1788 struct btrfs_path *log_path,
1789 struct inode *dir,
1790 struct btrfs_key *dir_key)
1791 {
1792 int ret;
1793 struct extent_buffer *eb;
1794 int slot;
1795 u32 item_size;
1796 struct btrfs_dir_item *di;
1797 struct btrfs_dir_item *log_di;
1798 int name_len;
1799 unsigned long ptr;
1800 unsigned long ptr_end;
1801 char *name;
1802 struct inode *inode;
1803 struct btrfs_key location;
1804
1805 again:
1806 eb = path->nodes[0];
1807 slot = path->slots[0];
1808 item_size = btrfs_item_size_nr(eb, slot);
1809 ptr = btrfs_item_ptr_offset(eb, slot);
1810 ptr_end = ptr + item_size;
1811 while (ptr < ptr_end) {
1812 di = (struct btrfs_dir_item *)ptr;
1813 if (verify_dir_item(root, eb, di)) {
1814 ret = -EIO;
1815 goto out;
1816 }
1817
1818 name_len = btrfs_dir_name_len(eb, di);
1819 name = kmalloc(name_len, GFP_NOFS);
1820 if (!name) {
1821 ret = -ENOMEM;
1822 goto out;
1823 }
1824 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1825 name_len);
1826 log_di = NULL;
1827 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1828 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1829 dir_key->objectid,
1830 name, name_len, 0);
1831 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1832 log_di = btrfs_lookup_dir_index_item(trans, log,
1833 log_path,
1834 dir_key->objectid,
1835 dir_key->offset,
1836 name, name_len, 0);
1837 }
1838 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
1839 btrfs_dir_item_key_to_cpu(eb, di, &location);
1840 btrfs_release_path(path);
1841 btrfs_release_path(log_path);
1842 inode = read_one_inode(root, location.objectid);
1843 if (!inode) {
1844 kfree(name);
1845 return -EIO;
1846 }
1847
1848 ret = link_to_fixup_dir(trans, root,
1849 path, location.objectid);
1850 if (ret) {
1851 kfree(name);
1852 iput(inode);
1853 goto out;
1854 }
1855
1856 inc_nlink(inode);
1857 ret = btrfs_unlink_inode(trans, root, dir, inode,
1858 name, name_len);
1859 if (!ret)
1860 ret = btrfs_run_delayed_items(trans, root);
1861 kfree(name);
1862 iput(inode);
1863 if (ret)
1864 goto out;
1865
1866 /* there might still be more names under this key
1867 * check and repeat if required
1868 */
1869 ret = btrfs_search_slot(NULL, root, dir_key, path,
1870 0, 0);
1871 if (ret == 0)
1872 goto again;
1873 ret = 0;
1874 goto out;
1875 } else if (IS_ERR(log_di)) {
1876 kfree(name);
1877 return PTR_ERR(log_di);
1878 }
1879 btrfs_release_path(log_path);
1880 kfree(name);
1881
1882 ptr = (unsigned long)(di + 1);
1883 ptr += name_len;
1884 }
1885 ret = 0;
1886 out:
1887 btrfs_release_path(path);
1888 btrfs_release_path(log_path);
1889 return ret;
1890 }
1891
1892 /*
1893 * deletion replay happens before we copy any new directory items
1894 * out of the log or out of backreferences from inodes. It
1895 * scans the log to find ranges of keys that log is authoritative for,
1896 * and then scans the directory to find items in those ranges that are
1897 * not present in the log.
1898 *
1899 * Anything we don't find in the log is unlinked and removed from the
1900 * directory.
1901 */
1902 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1903 struct btrfs_root *root,
1904 struct btrfs_root *log,
1905 struct btrfs_path *path,
1906 u64 dirid, int del_all)
1907 {
1908 u64 range_start;
1909 u64 range_end;
1910 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1911 int ret = 0;
1912 struct btrfs_key dir_key;
1913 struct btrfs_key found_key;
1914 struct btrfs_path *log_path;
1915 struct inode *dir;
1916
1917 dir_key.objectid = dirid;
1918 dir_key.type = BTRFS_DIR_ITEM_KEY;
1919 log_path = btrfs_alloc_path();
1920 if (!log_path)
1921 return -ENOMEM;
1922
1923 dir = read_one_inode(root, dirid);
1924 /* it isn't an error if the inode isn't there, that can happen
1925 * because we replay the deletes before we copy in the inode item
1926 * from the log
1927 */
1928 if (!dir) {
1929 btrfs_free_path(log_path);
1930 return 0;
1931 }
1932 again:
1933 range_start = 0;
1934 range_end = 0;
1935 while (1) {
1936 if (del_all)
1937 range_end = (u64)-1;
1938 else {
1939 ret = find_dir_range(log, path, dirid, key_type,
1940 &range_start, &range_end);
1941 if (ret != 0)
1942 break;
1943 }
1944
1945 dir_key.offset = range_start;
1946 while (1) {
1947 int nritems;
1948 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1949 0, 0);
1950 if (ret < 0)
1951 goto out;
1952
1953 nritems = btrfs_header_nritems(path->nodes[0]);
1954 if (path->slots[0] >= nritems) {
1955 ret = btrfs_next_leaf(root, path);
1956 if (ret)
1957 break;
1958 }
1959 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1960 path->slots[0]);
1961 if (found_key.objectid != dirid ||
1962 found_key.type != dir_key.type)
1963 goto next_type;
1964
1965 if (found_key.offset > range_end)
1966 break;
1967
1968 ret = check_item_in_log(trans, root, log, path,
1969 log_path, dir,
1970 &found_key);
1971 if (ret)
1972 goto out;
1973 if (found_key.offset == (u64)-1)
1974 break;
1975 dir_key.offset = found_key.offset + 1;
1976 }
1977 btrfs_release_path(path);
1978 if (range_end == (u64)-1)
1979 break;
1980 range_start = range_end + 1;
1981 }
1982
1983 next_type:
1984 ret = 0;
1985 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1986 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1987 dir_key.type = BTRFS_DIR_INDEX_KEY;
1988 btrfs_release_path(path);
1989 goto again;
1990 }
1991 out:
1992 btrfs_release_path(path);
1993 btrfs_free_path(log_path);
1994 iput(dir);
1995 return ret;
1996 }
1997
1998 /*
1999 * the process_func used to replay items from the log tree. This
2000 * gets called in two different stages. The first stage just looks
2001 * for inodes and makes sure they are all copied into the subvolume.
2002 *
2003 * The second stage copies all the other item types from the log into
2004 * the subvolume. The two stage approach is slower, but gets rid of
2005 * lots of complexity around inodes referencing other inodes that exist
2006 * only in the log (references come from either directory items or inode
2007 * back refs).
2008 */
2009 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2010 struct walk_control *wc, u64 gen)
2011 {
2012 int nritems;
2013 struct btrfs_path *path;
2014 struct btrfs_root *root = wc->replay_dest;
2015 struct btrfs_key key;
2016 int level;
2017 int i;
2018 int ret;
2019
2020 ret = btrfs_read_buffer(eb, gen);
2021 if (ret)
2022 return ret;
2023
2024 level = btrfs_header_level(eb);
2025
2026 if (level != 0)
2027 return 0;
2028
2029 path = btrfs_alloc_path();
2030 if (!path)
2031 return -ENOMEM;
2032
2033 nritems = btrfs_header_nritems(eb);
2034 for (i = 0; i < nritems; i++) {
2035 btrfs_item_key_to_cpu(eb, &key, i);
2036
2037 /* inode keys are done during the first stage */
2038 if (key.type == BTRFS_INODE_ITEM_KEY &&
2039 wc->stage == LOG_WALK_REPLAY_INODES) {
2040 struct btrfs_inode_item *inode_item;
2041 u32 mode;
2042
2043 inode_item = btrfs_item_ptr(eb, i,
2044 struct btrfs_inode_item);
2045 mode = btrfs_inode_mode(eb, inode_item);
2046 if (S_ISDIR(mode)) {
2047 ret = replay_dir_deletes(wc->trans,
2048 root, log, path, key.objectid, 0);
2049 if (ret)
2050 break;
2051 }
2052 ret = overwrite_item(wc->trans, root, path,
2053 eb, i, &key);
2054 if (ret)
2055 break;
2056
2057 /* for regular files, make sure corresponding
2058 * orhpan item exist. extents past the new EOF
2059 * will be truncated later by orphan cleanup.
2060 */
2061 if (S_ISREG(mode)) {
2062 ret = insert_orphan_item(wc->trans, root,
2063 key.objectid);
2064 if (ret)
2065 break;
2066 }
2067
2068 ret = link_to_fixup_dir(wc->trans, root,
2069 path, key.objectid);
2070 if (ret)
2071 break;
2072 }
2073
2074 if (key.type == BTRFS_DIR_INDEX_KEY &&
2075 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2076 ret = replay_one_dir_item(wc->trans, root, path,
2077 eb, i, &key);
2078 if (ret)
2079 break;
2080 }
2081
2082 if (wc->stage < LOG_WALK_REPLAY_ALL)
2083 continue;
2084
2085 /* these keys are simply copied */
2086 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2087 ret = overwrite_item(wc->trans, root, path,
2088 eb, i, &key);
2089 if (ret)
2090 break;
2091 } else if (key.type == BTRFS_INODE_REF_KEY ||
2092 key.type == BTRFS_INODE_EXTREF_KEY) {
2093 ret = add_inode_ref(wc->trans, root, log, path,
2094 eb, i, &key);
2095 if (ret && ret != -ENOENT)
2096 break;
2097 ret = 0;
2098 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2099 ret = replay_one_extent(wc->trans, root, path,
2100 eb, i, &key);
2101 if (ret)
2102 break;
2103 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2104 ret = replay_one_dir_item(wc->trans, root, path,
2105 eb, i, &key);
2106 if (ret)
2107 break;
2108 }
2109 }
2110 btrfs_free_path(path);
2111 return ret;
2112 }
2113
2114 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2115 struct btrfs_root *root,
2116 struct btrfs_path *path, int *level,
2117 struct walk_control *wc)
2118 {
2119 u64 root_owner;
2120 u64 bytenr;
2121 u64 ptr_gen;
2122 struct extent_buffer *next;
2123 struct extent_buffer *cur;
2124 struct extent_buffer *parent;
2125 u32 blocksize;
2126 int ret = 0;
2127
2128 WARN_ON(*level < 0);
2129 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2130
2131 while (*level > 0) {
2132 WARN_ON(*level < 0);
2133 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2134 cur = path->nodes[*level];
2135
2136 WARN_ON(btrfs_header_level(cur) != *level);
2137
2138 if (path->slots[*level] >=
2139 btrfs_header_nritems(cur))
2140 break;
2141
2142 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2143 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2144 blocksize = btrfs_level_size(root, *level - 1);
2145
2146 parent = path->nodes[*level];
2147 root_owner = btrfs_header_owner(parent);
2148
2149 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2150 if (!next)
2151 return -ENOMEM;
2152
2153 if (*level == 1) {
2154 ret = wc->process_func(root, next, wc, ptr_gen);
2155 if (ret) {
2156 free_extent_buffer(next);
2157 return ret;
2158 }
2159
2160 path->slots[*level]++;
2161 if (wc->free) {
2162 ret = btrfs_read_buffer(next, ptr_gen);
2163 if (ret) {
2164 free_extent_buffer(next);
2165 return ret;
2166 }
2167
2168 if (trans) {
2169 btrfs_tree_lock(next);
2170 btrfs_set_lock_blocking(next);
2171 clean_tree_block(trans, root, next);
2172 btrfs_wait_tree_block_writeback(next);
2173 btrfs_tree_unlock(next);
2174 }
2175
2176 WARN_ON(root_owner !=
2177 BTRFS_TREE_LOG_OBJECTID);
2178 ret = btrfs_free_and_pin_reserved_extent(root,
2179 bytenr, blocksize);
2180 if (ret) {
2181 free_extent_buffer(next);
2182 return ret;
2183 }
2184 }
2185 free_extent_buffer(next);
2186 continue;
2187 }
2188 ret = btrfs_read_buffer(next, ptr_gen);
2189 if (ret) {
2190 free_extent_buffer(next);
2191 return ret;
2192 }
2193
2194 WARN_ON(*level <= 0);
2195 if (path->nodes[*level-1])
2196 free_extent_buffer(path->nodes[*level-1]);
2197 path->nodes[*level-1] = next;
2198 *level = btrfs_header_level(next);
2199 path->slots[*level] = 0;
2200 cond_resched();
2201 }
2202 WARN_ON(*level < 0);
2203 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2204
2205 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2206
2207 cond_resched();
2208 return 0;
2209 }
2210
2211 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2212 struct btrfs_root *root,
2213 struct btrfs_path *path, int *level,
2214 struct walk_control *wc)
2215 {
2216 u64 root_owner;
2217 int i;
2218 int slot;
2219 int ret;
2220
2221 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2222 slot = path->slots[i];
2223 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2224 path->slots[i]++;
2225 *level = i;
2226 WARN_ON(*level == 0);
2227 return 0;
2228 } else {
2229 struct extent_buffer *parent;
2230 if (path->nodes[*level] == root->node)
2231 parent = path->nodes[*level];
2232 else
2233 parent = path->nodes[*level + 1];
2234
2235 root_owner = btrfs_header_owner(parent);
2236 ret = wc->process_func(root, path->nodes[*level], wc,
2237 btrfs_header_generation(path->nodes[*level]));
2238 if (ret)
2239 return ret;
2240
2241 if (wc->free) {
2242 struct extent_buffer *next;
2243
2244 next = path->nodes[*level];
2245
2246 if (trans) {
2247 btrfs_tree_lock(next);
2248 btrfs_set_lock_blocking(next);
2249 clean_tree_block(trans, root, next);
2250 btrfs_wait_tree_block_writeback(next);
2251 btrfs_tree_unlock(next);
2252 }
2253
2254 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2255 ret = btrfs_free_and_pin_reserved_extent(root,
2256 path->nodes[*level]->start,
2257 path->nodes[*level]->len);
2258 if (ret)
2259 return ret;
2260 }
2261 free_extent_buffer(path->nodes[*level]);
2262 path->nodes[*level] = NULL;
2263 *level = i + 1;
2264 }
2265 }
2266 return 1;
2267 }
2268
2269 /*
2270 * drop the reference count on the tree rooted at 'snap'. This traverses
2271 * the tree freeing any blocks that have a ref count of zero after being
2272 * decremented.
2273 */
2274 static int walk_log_tree(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *log, struct walk_control *wc)
2276 {
2277 int ret = 0;
2278 int wret;
2279 int level;
2280 struct btrfs_path *path;
2281 int orig_level;
2282
2283 path = btrfs_alloc_path();
2284 if (!path)
2285 return -ENOMEM;
2286
2287 level = btrfs_header_level(log->node);
2288 orig_level = level;
2289 path->nodes[level] = log->node;
2290 extent_buffer_get(log->node);
2291 path->slots[level] = 0;
2292
2293 while (1) {
2294 wret = walk_down_log_tree(trans, log, path, &level, wc);
2295 if (wret > 0)
2296 break;
2297 if (wret < 0) {
2298 ret = wret;
2299 goto out;
2300 }
2301
2302 wret = walk_up_log_tree(trans, log, path, &level, wc);
2303 if (wret > 0)
2304 break;
2305 if (wret < 0) {
2306 ret = wret;
2307 goto out;
2308 }
2309 }
2310
2311 /* was the root node processed? if not, catch it here */
2312 if (path->nodes[orig_level]) {
2313 ret = wc->process_func(log, path->nodes[orig_level], wc,
2314 btrfs_header_generation(path->nodes[orig_level]));
2315 if (ret)
2316 goto out;
2317 if (wc->free) {
2318 struct extent_buffer *next;
2319
2320 next = path->nodes[orig_level];
2321
2322 if (trans) {
2323 btrfs_tree_lock(next);
2324 btrfs_set_lock_blocking(next);
2325 clean_tree_block(trans, log, next);
2326 btrfs_wait_tree_block_writeback(next);
2327 btrfs_tree_unlock(next);
2328 }
2329
2330 WARN_ON(log->root_key.objectid !=
2331 BTRFS_TREE_LOG_OBJECTID);
2332 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2333 next->len);
2334 if (ret)
2335 goto out;
2336 }
2337 }
2338
2339 out:
2340 btrfs_free_path(path);
2341 return ret;
2342 }
2343
2344 /*
2345 * helper function to update the item for a given subvolumes log root
2346 * in the tree of log roots
2347 */
2348 static int update_log_root(struct btrfs_trans_handle *trans,
2349 struct btrfs_root *log)
2350 {
2351 int ret;
2352
2353 if (log->log_transid == 1) {
2354 /* insert root item on the first sync */
2355 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2356 &log->root_key, &log->root_item);
2357 } else {
2358 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2359 &log->root_key, &log->root_item);
2360 }
2361 return ret;
2362 }
2363
2364 static int wait_log_commit(struct btrfs_trans_handle *trans,
2365 struct btrfs_root *root, unsigned long transid)
2366 {
2367 DEFINE_WAIT(wait);
2368 int index = transid % 2;
2369 int ret = 0;
2370
2371 /*
2372 * we only allow two pending log transactions at a time,
2373 * so we know that if ours is more than 2 older than the
2374 * current transaction, we're done
2375 */
2376 do {
2377 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
2378 trans->transid) {
2379 ret = -EAGAIN;
2380 break;
2381 }
2382
2383 prepare_to_wait(&root->log_commit_wait[index],
2384 &wait, TASK_UNINTERRUPTIBLE);
2385 mutex_unlock(&root->log_mutex);
2386
2387 if (root->log_transid < transid + 2 &&
2388 atomic_read(&root->log_commit[index]))
2389 schedule();
2390
2391 finish_wait(&root->log_commit_wait[index], &wait);
2392 mutex_lock(&root->log_mutex);
2393 } while (root->log_transid < transid + 2 &&
2394 atomic_read(&root->log_commit[index]));
2395
2396 return ret;
2397 }
2398
2399 static void wait_for_writer(struct btrfs_trans_handle *trans,
2400 struct btrfs_root *root)
2401 {
2402 DEFINE_WAIT(wait);
2403 while (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) !=
2404 trans->transid && atomic_read(&root->log_writers)) {
2405 prepare_to_wait(&root->log_writer_wait,
2406 &wait, TASK_UNINTERRUPTIBLE);
2407 mutex_unlock(&root->log_mutex);
2408 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) !=
2409 trans->transid && atomic_read(&root->log_writers))
2410 schedule();
2411 mutex_lock(&root->log_mutex);
2412 finish_wait(&root->log_writer_wait, &wait);
2413 }
2414 }
2415
2416 /*
2417 * btrfs_sync_log does sends a given tree log down to the disk and
2418 * updates the super blocks to record it. When this call is done,
2419 * you know that any inodes previously logged are safely on disk only
2420 * if it returns 0.
2421 *
2422 * Any other return value means you need to call btrfs_commit_transaction.
2423 * Some of the edge cases for fsyncing directories that have had unlinks
2424 * or renames done in the past mean that sometimes the only safe
2425 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2426 * that has happened.
2427 */
2428 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2429 struct btrfs_root *root)
2430 {
2431 int index1;
2432 int index2;
2433 int mark;
2434 int ret;
2435 struct btrfs_root *log = root->log_root;
2436 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2437 unsigned long log_transid = 0;
2438 struct blk_plug plug;
2439
2440 mutex_lock(&root->log_mutex);
2441 log_transid = root->log_transid;
2442 index1 = root->log_transid % 2;
2443 if (atomic_read(&root->log_commit[index1])) {
2444 ret = wait_log_commit(trans, root, root->log_transid);
2445 mutex_unlock(&root->log_mutex);
2446 return ret;
2447 }
2448 atomic_set(&root->log_commit[index1], 1);
2449
2450 /* wait for previous tree log sync to complete */
2451 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2452 wait_log_commit(trans, root, root->log_transid - 1);
2453
2454 while (1) {
2455 int batch = atomic_read(&root->log_batch);
2456 /* when we're on an ssd, just kick the log commit out */
2457 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
2458 mutex_unlock(&root->log_mutex);
2459 schedule_timeout_uninterruptible(1);
2460 mutex_lock(&root->log_mutex);
2461 }
2462 wait_for_writer(trans, root);
2463 if (batch == atomic_read(&root->log_batch))
2464 break;
2465 }
2466
2467 /* bail out if we need to do a full commit */
2468 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
2469 trans->transid) {
2470 ret = -EAGAIN;
2471 btrfs_free_logged_extents(log, log_transid);
2472 mutex_unlock(&root->log_mutex);
2473 goto out;
2474 }
2475
2476 if (log_transid % 2 == 0)
2477 mark = EXTENT_DIRTY;
2478 else
2479 mark = EXTENT_NEW;
2480
2481 /* we start IO on all the marked extents here, but we don't actually
2482 * wait for them until later.
2483 */
2484 blk_start_plug(&plug);
2485 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2486 if (ret) {
2487 blk_finish_plug(&plug);
2488 btrfs_abort_transaction(trans, root, ret);
2489 btrfs_free_logged_extents(log, log_transid);
2490 mutex_unlock(&root->log_mutex);
2491 goto out;
2492 }
2493
2494 btrfs_set_root_node(&log->root_item, log->node);
2495
2496 root->log_transid++;
2497 log->log_transid = root->log_transid;
2498 root->log_start_pid = 0;
2499 /*
2500 * IO has been started, blocks of the log tree have WRITTEN flag set
2501 * in their headers. new modifications of the log will be written to
2502 * new positions. so it's safe to allow log writers to go in.
2503 */
2504 mutex_unlock(&root->log_mutex);
2505
2506 mutex_lock(&log_root_tree->log_mutex);
2507 atomic_inc(&log_root_tree->log_batch);
2508 atomic_inc(&log_root_tree->log_writers);
2509 mutex_unlock(&log_root_tree->log_mutex);
2510
2511 ret = update_log_root(trans, log);
2512
2513 mutex_lock(&log_root_tree->log_mutex);
2514 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2515 smp_mb();
2516 if (waitqueue_active(&log_root_tree->log_writer_wait))
2517 wake_up(&log_root_tree->log_writer_wait);
2518 }
2519
2520 if (ret) {
2521 blk_finish_plug(&plug);
2522 if (ret != -ENOSPC) {
2523 btrfs_abort_transaction(trans, root, ret);
2524 mutex_unlock(&log_root_tree->log_mutex);
2525 goto out;
2526 }
2527 ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) =
2528 trans->transid;
2529 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2530 btrfs_free_logged_extents(log, log_transid);
2531 mutex_unlock(&log_root_tree->log_mutex);
2532 ret = -EAGAIN;
2533 goto out;
2534 }
2535
2536 index2 = log_root_tree->log_transid % 2;
2537 if (atomic_read(&log_root_tree->log_commit[index2])) {
2538 blk_finish_plug(&plug);
2539 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2540 ret = wait_log_commit(trans, log_root_tree,
2541 log_root_tree->log_transid);
2542 btrfs_free_logged_extents(log, log_transid);
2543 mutex_unlock(&log_root_tree->log_mutex);
2544 goto out;
2545 }
2546 atomic_set(&log_root_tree->log_commit[index2], 1);
2547
2548 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2549 wait_log_commit(trans, log_root_tree,
2550 log_root_tree->log_transid - 1);
2551 }
2552
2553 wait_for_writer(trans, log_root_tree);
2554
2555 /*
2556 * now that we've moved on to the tree of log tree roots,
2557 * check the full commit flag again
2558 */
2559 if (ACCESS_ONCE(root->fs_info->last_trans_log_full_commit) ==
2560 trans->transid) {
2561 blk_finish_plug(&plug);
2562 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2563 btrfs_free_logged_extents(log, log_transid);
2564 mutex_unlock(&log_root_tree->log_mutex);
2565 ret = -EAGAIN;
2566 goto out_wake_log_root;
2567 }
2568
2569 ret = btrfs_write_marked_extents(log_root_tree,
2570 &log_root_tree->dirty_log_pages,
2571 EXTENT_DIRTY | EXTENT_NEW);
2572 blk_finish_plug(&plug);
2573 if (ret) {
2574 btrfs_abort_transaction(trans, root, ret);
2575 btrfs_free_logged_extents(log, log_transid);
2576 mutex_unlock(&log_root_tree->log_mutex);
2577 goto out_wake_log_root;
2578 }
2579 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2580 btrfs_wait_marked_extents(log_root_tree,
2581 &log_root_tree->dirty_log_pages,
2582 EXTENT_NEW | EXTENT_DIRTY);
2583 btrfs_wait_logged_extents(log, log_transid);
2584
2585 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2586 log_root_tree->node->start);
2587 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2588 btrfs_header_level(log_root_tree->node));
2589
2590 log_root_tree->log_transid++;
2591 mutex_unlock(&log_root_tree->log_mutex);
2592
2593 /*
2594 * nobody else is going to jump in and write the the ctree
2595 * super here because the log_commit atomic below is protecting
2596 * us. We must be called with a transaction handle pinning
2597 * the running transaction open, so a full commit can't hop
2598 * in and cause problems either.
2599 */
2600 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2601 if (ret) {
2602 btrfs_abort_transaction(trans, root, ret);
2603 goto out_wake_log_root;
2604 }
2605
2606 mutex_lock(&root->log_mutex);
2607 if (root->last_log_commit < log_transid)
2608 root->last_log_commit = log_transid;
2609 mutex_unlock(&root->log_mutex);
2610
2611 out_wake_log_root:
2612 atomic_set(&log_root_tree->log_commit[index2], 0);
2613 smp_mb();
2614 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2615 wake_up(&log_root_tree->log_commit_wait[index2]);
2616 out:
2617 atomic_set(&root->log_commit[index1], 0);
2618 smp_mb();
2619 if (waitqueue_active(&root->log_commit_wait[index1]))
2620 wake_up(&root->log_commit_wait[index1]);
2621 return ret;
2622 }
2623
2624 static void free_log_tree(struct btrfs_trans_handle *trans,
2625 struct btrfs_root *log)
2626 {
2627 int ret;
2628 u64 start;
2629 u64 end;
2630 struct walk_control wc = {
2631 .free = 1,
2632 .process_func = process_one_buffer
2633 };
2634
2635 ret = walk_log_tree(trans, log, &wc);
2636 /* I don't think this can happen but just in case */
2637 if (ret)
2638 btrfs_abort_transaction(trans, log, ret);
2639
2640 while (1) {
2641 ret = find_first_extent_bit(&log->dirty_log_pages,
2642 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2643 NULL);
2644 if (ret)
2645 break;
2646
2647 clear_extent_bits(&log->dirty_log_pages, start, end,
2648 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2649 }
2650
2651 /*
2652 * We may have short-circuited the log tree with the full commit logic
2653 * and left ordered extents on our list, so clear these out to keep us
2654 * from leaking inodes and memory.
2655 */
2656 btrfs_free_logged_extents(log, 0);
2657 btrfs_free_logged_extents(log, 1);
2658
2659 free_extent_buffer(log->node);
2660 kfree(log);
2661 }
2662
2663 /*
2664 * free all the extents used by the tree log. This should be called
2665 * at commit time of the full transaction
2666 */
2667 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2668 {
2669 if (root->log_root) {
2670 free_log_tree(trans, root->log_root);
2671 root->log_root = NULL;
2672 }
2673 return 0;
2674 }
2675
2676 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2677 struct btrfs_fs_info *fs_info)
2678 {
2679 if (fs_info->log_root_tree) {
2680 free_log_tree(trans, fs_info->log_root_tree);
2681 fs_info->log_root_tree = NULL;
2682 }
2683 return 0;
2684 }
2685
2686 /*
2687 * If both a file and directory are logged, and unlinks or renames are
2688 * mixed in, we have a few interesting corners:
2689 *
2690 * create file X in dir Y
2691 * link file X to X.link in dir Y
2692 * fsync file X
2693 * unlink file X but leave X.link
2694 * fsync dir Y
2695 *
2696 * After a crash we would expect only X.link to exist. But file X
2697 * didn't get fsync'd again so the log has back refs for X and X.link.
2698 *
2699 * We solve this by removing directory entries and inode backrefs from the
2700 * log when a file that was logged in the current transaction is
2701 * unlinked. Any later fsync will include the updated log entries, and
2702 * we'll be able to reconstruct the proper directory items from backrefs.
2703 *
2704 * This optimizations allows us to avoid relogging the entire inode
2705 * or the entire directory.
2706 */
2707 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2708 struct btrfs_root *root,
2709 const char *name, int name_len,
2710 struct inode *dir, u64 index)
2711 {
2712 struct btrfs_root *log;
2713 struct btrfs_dir_item *di;
2714 struct btrfs_path *path;
2715 int ret;
2716 int err = 0;
2717 int bytes_del = 0;
2718 u64 dir_ino = btrfs_ino(dir);
2719
2720 if (BTRFS_I(dir)->logged_trans < trans->transid)
2721 return 0;
2722
2723 ret = join_running_log_trans(root);
2724 if (ret)
2725 return 0;
2726
2727 mutex_lock(&BTRFS_I(dir)->log_mutex);
2728
2729 log = root->log_root;
2730 path = btrfs_alloc_path();
2731 if (!path) {
2732 err = -ENOMEM;
2733 goto out_unlock;
2734 }
2735
2736 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2737 name, name_len, -1);
2738 if (IS_ERR(di)) {
2739 err = PTR_ERR(di);
2740 goto fail;
2741 }
2742 if (di) {
2743 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2744 bytes_del += name_len;
2745 if (ret) {
2746 err = ret;
2747 goto fail;
2748 }
2749 }
2750 btrfs_release_path(path);
2751 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2752 index, name, name_len, -1);
2753 if (IS_ERR(di)) {
2754 err = PTR_ERR(di);
2755 goto fail;
2756 }
2757 if (di) {
2758 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2759 bytes_del += name_len;
2760 if (ret) {
2761 err = ret;
2762 goto fail;
2763 }
2764 }
2765
2766 /* update the directory size in the log to reflect the names
2767 * we have removed
2768 */
2769 if (bytes_del) {
2770 struct btrfs_key key;
2771
2772 key.objectid = dir_ino;
2773 key.offset = 0;
2774 key.type = BTRFS_INODE_ITEM_KEY;
2775 btrfs_release_path(path);
2776
2777 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2778 if (ret < 0) {
2779 err = ret;
2780 goto fail;
2781 }
2782 if (ret == 0) {
2783 struct btrfs_inode_item *item;
2784 u64 i_size;
2785
2786 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2787 struct btrfs_inode_item);
2788 i_size = btrfs_inode_size(path->nodes[0], item);
2789 if (i_size > bytes_del)
2790 i_size -= bytes_del;
2791 else
2792 i_size = 0;
2793 btrfs_set_inode_size(path->nodes[0], item, i_size);
2794 btrfs_mark_buffer_dirty(path->nodes[0]);
2795 } else
2796 ret = 0;
2797 btrfs_release_path(path);
2798 }
2799 fail:
2800 btrfs_free_path(path);
2801 out_unlock:
2802 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2803 if (ret == -ENOSPC) {
2804 root->fs_info->last_trans_log_full_commit = trans->transid;
2805 ret = 0;
2806 } else if (ret < 0)
2807 btrfs_abort_transaction(trans, root, ret);
2808
2809 btrfs_end_log_trans(root);
2810
2811 return err;
2812 }
2813
2814 /* see comments for btrfs_del_dir_entries_in_log */
2815 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2816 struct btrfs_root *root,
2817 const char *name, int name_len,
2818 struct inode *inode, u64 dirid)
2819 {
2820 struct btrfs_root *log;
2821 u64 index;
2822 int ret;
2823
2824 if (BTRFS_I(inode)->logged_trans < trans->transid)
2825 return 0;
2826
2827 ret = join_running_log_trans(root);
2828 if (ret)
2829 return 0;
2830 log = root->log_root;
2831 mutex_lock(&BTRFS_I(inode)->log_mutex);
2832
2833 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2834 dirid, &index);
2835 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2836 if (ret == -ENOSPC) {
2837 root->fs_info->last_trans_log_full_commit = trans->transid;
2838 ret = 0;
2839 } else if (ret < 0 && ret != -ENOENT)
2840 btrfs_abort_transaction(trans, root, ret);
2841 btrfs_end_log_trans(root);
2842
2843 return ret;
2844 }
2845
2846 /*
2847 * creates a range item in the log for 'dirid'. first_offset and
2848 * last_offset tell us which parts of the key space the log should
2849 * be considered authoritative for.
2850 */
2851 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2852 struct btrfs_root *log,
2853 struct btrfs_path *path,
2854 int key_type, u64 dirid,
2855 u64 first_offset, u64 last_offset)
2856 {
2857 int ret;
2858 struct btrfs_key key;
2859 struct btrfs_dir_log_item *item;
2860
2861 key.objectid = dirid;
2862 key.offset = first_offset;
2863 if (key_type == BTRFS_DIR_ITEM_KEY)
2864 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2865 else
2866 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2867 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2868 if (ret)
2869 return ret;
2870
2871 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2872 struct btrfs_dir_log_item);
2873 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2874 btrfs_mark_buffer_dirty(path->nodes[0]);
2875 btrfs_release_path(path);
2876 return 0;
2877 }
2878
2879 /*
2880 * log all the items included in the current transaction for a given
2881 * directory. This also creates the range items in the log tree required
2882 * to replay anything deleted before the fsync
2883 */
2884 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2885 struct btrfs_root *root, struct inode *inode,
2886 struct btrfs_path *path,
2887 struct btrfs_path *dst_path, int key_type,
2888 u64 min_offset, u64 *last_offset_ret)
2889 {
2890 struct btrfs_key min_key;
2891 struct btrfs_root *log = root->log_root;
2892 struct extent_buffer *src;
2893 int err = 0;
2894 int ret;
2895 int i;
2896 int nritems;
2897 u64 first_offset = min_offset;
2898 u64 last_offset = (u64)-1;
2899 u64 ino = btrfs_ino(inode);
2900
2901 log = root->log_root;
2902
2903 min_key.objectid = ino;
2904 min_key.type = key_type;
2905 min_key.offset = min_offset;
2906
2907 path->keep_locks = 1;
2908
2909 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
2910
2911 /*
2912 * we didn't find anything from this transaction, see if there
2913 * is anything at all
2914 */
2915 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2916 min_key.objectid = ino;
2917 min_key.type = key_type;
2918 min_key.offset = (u64)-1;
2919 btrfs_release_path(path);
2920 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2921 if (ret < 0) {
2922 btrfs_release_path(path);
2923 return ret;
2924 }
2925 ret = btrfs_previous_item(root, path, ino, key_type);
2926
2927 /* if ret == 0 there are items for this type,
2928 * create a range to tell us the last key of this type.
2929 * otherwise, there are no items in this directory after
2930 * *min_offset, and we create a range to indicate that.
2931 */
2932 if (ret == 0) {
2933 struct btrfs_key tmp;
2934 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2935 path->slots[0]);
2936 if (key_type == tmp.type)
2937 first_offset = max(min_offset, tmp.offset) + 1;
2938 }
2939 goto done;
2940 }
2941
2942 /* go backward to find any previous key */
2943 ret = btrfs_previous_item(root, path, ino, key_type);
2944 if (ret == 0) {
2945 struct btrfs_key tmp;
2946 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2947 if (key_type == tmp.type) {
2948 first_offset = tmp.offset;
2949 ret = overwrite_item(trans, log, dst_path,
2950 path->nodes[0], path->slots[0],
2951 &tmp);
2952 if (ret) {
2953 err = ret;
2954 goto done;
2955 }
2956 }
2957 }
2958 btrfs_release_path(path);
2959
2960 /* find the first key from this transaction again */
2961 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2962 if (WARN_ON(ret != 0))
2963 goto done;
2964
2965 /*
2966 * we have a block from this transaction, log every item in it
2967 * from our directory
2968 */
2969 while (1) {
2970 struct btrfs_key tmp;
2971 src = path->nodes[0];
2972 nritems = btrfs_header_nritems(src);
2973 for (i = path->slots[0]; i < nritems; i++) {
2974 btrfs_item_key_to_cpu(src, &min_key, i);
2975
2976 if (min_key.objectid != ino || min_key.type != key_type)
2977 goto done;
2978 ret = overwrite_item(trans, log, dst_path, src, i,
2979 &min_key);
2980 if (ret) {
2981 err = ret;
2982 goto done;
2983 }
2984 }
2985 path->slots[0] = nritems;
2986
2987 /*
2988 * look ahead to the next item and see if it is also
2989 * from this directory and from this transaction
2990 */
2991 ret = btrfs_next_leaf(root, path);
2992 if (ret == 1) {
2993 last_offset = (u64)-1;
2994 goto done;
2995 }
2996 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2997 if (tmp.objectid != ino || tmp.type != key_type) {
2998 last_offset = (u64)-1;
2999 goto done;
3000 }
3001 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3002 ret = overwrite_item(trans, log, dst_path,
3003 path->nodes[0], path->slots[0],
3004 &tmp);
3005 if (ret)
3006 err = ret;
3007 else
3008 last_offset = tmp.offset;
3009 goto done;
3010 }
3011 }
3012 done:
3013 btrfs_release_path(path);
3014 btrfs_release_path(dst_path);
3015
3016 if (err == 0) {
3017 *last_offset_ret = last_offset;
3018 /*
3019 * insert the log range keys to indicate where the log
3020 * is valid
3021 */
3022 ret = insert_dir_log_key(trans, log, path, key_type,
3023 ino, first_offset, last_offset);
3024 if (ret)
3025 err = ret;
3026 }
3027 return err;
3028 }
3029
3030 /*
3031 * logging directories is very similar to logging inodes, We find all the items
3032 * from the current transaction and write them to the log.
3033 *
3034 * The recovery code scans the directory in the subvolume, and if it finds a
3035 * key in the range logged that is not present in the log tree, then it means
3036 * that dir entry was unlinked during the transaction.
3037 *
3038 * In order for that scan to work, we must include one key smaller than
3039 * the smallest logged by this transaction and one key larger than the largest
3040 * key logged by this transaction.
3041 */
3042 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3043 struct btrfs_root *root, struct inode *inode,
3044 struct btrfs_path *path,
3045 struct btrfs_path *dst_path)
3046 {
3047 u64 min_key;
3048 u64 max_key;
3049 int ret;
3050 int key_type = BTRFS_DIR_ITEM_KEY;
3051
3052 again:
3053 min_key = 0;
3054 max_key = 0;
3055 while (1) {
3056 ret = log_dir_items(trans, root, inode, path,
3057 dst_path, key_type, min_key,
3058 &max_key);
3059 if (ret)
3060 return ret;
3061 if (max_key == (u64)-1)
3062 break;
3063 min_key = max_key + 1;
3064 }
3065
3066 if (key_type == BTRFS_DIR_ITEM_KEY) {
3067 key_type = BTRFS_DIR_INDEX_KEY;
3068 goto again;
3069 }
3070 return 0;
3071 }
3072
3073 /*
3074 * a helper function to drop items from the log before we relog an
3075 * inode. max_key_type indicates the highest item type to remove.
3076 * This cannot be run for file data extents because it does not
3077 * free the extents they point to.
3078 */
3079 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3080 struct btrfs_root *log,
3081 struct btrfs_path *path,
3082 u64 objectid, int max_key_type)
3083 {
3084 int ret;
3085 struct btrfs_key key;
3086 struct btrfs_key found_key;
3087 int start_slot;
3088
3089 key.objectid = objectid;
3090 key.type = max_key_type;
3091 key.offset = (u64)-1;
3092
3093 while (1) {
3094 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3095 BUG_ON(ret == 0); /* Logic error */
3096 if (ret < 0)
3097 break;
3098
3099 if (path->slots[0] == 0)
3100 break;
3101
3102 path->slots[0]--;
3103 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3104 path->slots[0]);
3105
3106 if (found_key.objectid != objectid)
3107 break;
3108
3109 found_key.offset = 0;
3110 found_key.type = 0;
3111 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3112 &start_slot);
3113
3114 ret = btrfs_del_items(trans, log, path, start_slot,
3115 path->slots[0] - start_slot + 1);
3116 /*
3117 * If start slot isn't 0 then we don't need to re-search, we've
3118 * found the last guy with the objectid in this tree.
3119 */
3120 if (ret || start_slot != 0)
3121 break;
3122 btrfs_release_path(path);
3123 }
3124 btrfs_release_path(path);
3125 if (ret > 0)
3126 ret = 0;
3127 return ret;
3128 }
3129
3130 static void fill_inode_item(struct btrfs_trans_handle *trans,
3131 struct extent_buffer *leaf,
3132 struct btrfs_inode_item *item,
3133 struct inode *inode, int log_inode_only)
3134 {
3135 struct btrfs_map_token token;
3136
3137 btrfs_init_map_token(&token);
3138
3139 if (log_inode_only) {
3140 /* set the generation to zero so the recover code
3141 * can tell the difference between an logging
3142 * just to say 'this inode exists' and a logging
3143 * to say 'update this inode with these values'
3144 */
3145 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3146 btrfs_set_token_inode_size(leaf, item, 0, &token);
3147 } else {
3148 btrfs_set_token_inode_generation(leaf, item,
3149 BTRFS_I(inode)->generation,
3150 &token);
3151 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3152 }
3153
3154 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3155 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3156 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3157 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3158
3159 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3160 inode->i_atime.tv_sec, &token);
3161 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3162 inode->i_atime.tv_nsec, &token);
3163
3164 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3165 inode->i_mtime.tv_sec, &token);
3166 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3167 inode->i_mtime.tv_nsec, &token);
3168
3169 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3170 inode->i_ctime.tv_sec, &token);
3171 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3172 inode->i_ctime.tv_nsec, &token);
3173
3174 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3175 &token);
3176
3177 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3178 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3179 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3180 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3181 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3182 }
3183
3184 static int log_inode_item(struct btrfs_trans_handle *trans,
3185 struct btrfs_root *log, struct btrfs_path *path,
3186 struct inode *inode)
3187 {
3188 struct btrfs_inode_item *inode_item;
3189 int ret;
3190
3191 ret = btrfs_insert_empty_item(trans, log, path,
3192 &BTRFS_I(inode)->location,
3193 sizeof(*inode_item));
3194 if (ret && ret != -EEXIST)
3195 return ret;
3196 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3197 struct btrfs_inode_item);
3198 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3199 btrfs_release_path(path);
3200 return 0;
3201 }
3202
3203 static noinline int copy_items(struct btrfs_trans_handle *trans,
3204 struct inode *inode,
3205 struct btrfs_path *dst_path,
3206 struct btrfs_path *src_path, u64 *last_extent,
3207 int start_slot, int nr, int inode_only)
3208 {
3209 unsigned long src_offset;
3210 unsigned long dst_offset;
3211 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3212 struct btrfs_file_extent_item *extent;
3213 struct btrfs_inode_item *inode_item;
3214 struct extent_buffer *src = src_path->nodes[0];
3215 struct btrfs_key first_key, last_key, key;
3216 int ret;
3217 struct btrfs_key *ins_keys;
3218 u32 *ins_sizes;
3219 char *ins_data;
3220 int i;
3221 struct list_head ordered_sums;
3222 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3223 bool has_extents = false;
3224 bool need_find_last_extent = (*last_extent == 0);
3225 bool done = false;
3226
3227 INIT_LIST_HEAD(&ordered_sums);
3228
3229 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3230 nr * sizeof(u32), GFP_NOFS);
3231 if (!ins_data)
3232 return -ENOMEM;
3233
3234 first_key.objectid = (u64)-1;
3235
3236 ins_sizes = (u32 *)ins_data;
3237 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3238
3239 for (i = 0; i < nr; i++) {
3240 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3241 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3242 }
3243 ret = btrfs_insert_empty_items(trans, log, dst_path,
3244 ins_keys, ins_sizes, nr);
3245 if (ret) {
3246 kfree(ins_data);
3247 return ret;
3248 }
3249
3250 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3251 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3252 dst_path->slots[0]);
3253
3254 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3255
3256 if ((i == (nr - 1)))
3257 last_key = ins_keys[i];
3258
3259 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3260 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3261 dst_path->slots[0],
3262 struct btrfs_inode_item);
3263 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3264 inode, inode_only == LOG_INODE_EXISTS);
3265 } else {
3266 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3267 src_offset, ins_sizes[i]);
3268 }
3269
3270 /*
3271 * We set need_find_last_extent here in case we know we were
3272 * processing other items and then walk into the first extent in
3273 * the inode. If we don't hit an extent then nothing changes,
3274 * we'll do the last search the next time around.
3275 */
3276 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3277 has_extents = true;
3278 if (need_find_last_extent &&
3279 first_key.objectid == (u64)-1)
3280 first_key = ins_keys[i];
3281 } else {
3282 need_find_last_extent = false;
3283 }
3284
3285 /* take a reference on file data extents so that truncates
3286 * or deletes of this inode don't have to relog the inode
3287 * again
3288 */
3289 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
3290 !skip_csum) {
3291 int found_type;
3292 extent = btrfs_item_ptr(src, start_slot + i,
3293 struct btrfs_file_extent_item);
3294
3295 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3296 continue;
3297
3298 found_type = btrfs_file_extent_type(src, extent);
3299 if (found_type == BTRFS_FILE_EXTENT_REG) {
3300 u64 ds, dl, cs, cl;
3301 ds = btrfs_file_extent_disk_bytenr(src,
3302 extent);
3303 /* ds == 0 is a hole */
3304 if (ds == 0)
3305 continue;
3306
3307 dl = btrfs_file_extent_disk_num_bytes(src,
3308 extent);
3309 cs = btrfs_file_extent_offset(src, extent);
3310 cl = btrfs_file_extent_num_bytes(src,
3311 extent);
3312 if (btrfs_file_extent_compression(src,
3313 extent)) {
3314 cs = 0;
3315 cl = dl;
3316 }
3317
3318 ret = btrfs_lookup_csums_range(
3319 log->fs_info->csum_root,
3320 ds + cs, ds + cs + cl - 1,
3321 &ordered_sums, 0);
3322 if (ret) {
3323 btrfs_release_path(dst_path);
3324 kfree(ins_data);
3325 return ret;
3326 }
3327 }
3328 }
3329 }
3330
3331 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3332 btrfs_release_path(dst_path);
3333 kfree(ins_data);
3334
3335 /*
3336 * we have to do this after the loop above to avoid changing the
3337 * log tree while trying to change the log tree.
3338 */
3339 ret = 0;
3340 while (!list_empty(&ordered_sums)) {
3341 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3342 struct btrfs_ordered_sum,
3343 list);
3344 if (!ret)
3345 ret = btrfs_csum_file_blocks(trans, log, sums);
3346 list_del(&sums->list);
3347 kfree(sums);
3348 }
3349
3350 if (!has_extents)
3351 return ret;
3352
3353 /*
3354 * Because we use btrfs_search_forward we could skip leaves that were
3355 * not modified and then assume *last_extent is valid when it really
3356 * isn't. So back up to the previous leaf and read the end of the last
3357 * extent before we go and fill in holes.
3358 */
3359 if (need_find_last_extent) {
3360 u64 len;
3361
3362 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3363 if (ret < 0)
3364 return ret;
3365 if (ret)
3366 goto fill_holes;
3367 if (src_path->slots[0])
3368 src_path->slots[0]--;
3369 src = src_path->nodes[0];
3370 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3371 if (key.objectid != btrfs_ino(inode) ||
3372 key.type != BTRFS_EXTENT_DATA_KEY)
3373 goto fill_holes;
3374 extent = btrfs_item_ptr(src, src_path->slots[0],
3375 struct btrfs_file_extent_item);
3376 if (btrfs_file_extent_type(src, extent) ==
3377 BTRFS_FILE_EXTENT_INLINE) {
3378 len = btrfs_file_extent_inline_len(src,
3379 src_path->slots[0],
3380 extent);
3381 *last_extent = ALIGN(key.offset + len,
3382 log->sectorsize);
3383 } else {
3384 len = btrfs_file_extent_num_bytes(src, extent);
3385 *last_extent = key.offset + len;
3386 }
3387 }
3388 fill_holes:
3389 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3390 * things could have happened
3391 *
3392 * 1) A merge could have happened, so we could currently be on a leaf
3393 * that holds what we were copying in the first place.
3394 * 2) A split could have happened, and now not all of the items we want
3395 * are on the same leaf.
3396 *
3397 * So we need to adjust how we search for holes, we need to drop the
3398 * path and re-search for the first extent key we found, and then walk
3399 * forward until we hit the last one we copied.
3400 */
3401 if (need_find_last_extent) {
3402 /* btrfs_prev_leaf could return 1 without releasing the path */
3403 btrfs_release_path(src_path);
3404 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3405 src_path, 0, 0);
3406 if (ret < 0)
3407 return ret;
3408 ASSERT(ret == 0);
3409 src = src_path->nodes[0];
3410 i = src_path->slots[0];
3411 } else {
3412 i = start_slot;
3413 }
3414
3415 /*
3416 * Ok so here we need to go through and fill in any holes we may have
3417 * to make sure that holes are punched for those areas in case they had
3418 * extents previously.
3419 */
3420 while (!done) {
3421 u64 offset, len;
3422 u64 extent_end;
3423
3424 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3425 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3426 if (ret < 0)
3427 return ret;
3428 ASSERT(ret == 0);
3429 src = src_path->nodes[0];
3430 i = 0;
3431 }
3432
3433 btrfs_item_key_to_cpu(src, &key, i);
3434 if (!btrfs_comp_cpu_keys(&key, &last_key))
3435 done = true;
3436 if (key.objectid != btrfs_ino(inode) ||
3437 key.type != BTRFS_EXTENT_DATA_KEY) {
3438 i++;
3439 continue;
3440 }
3441 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3442 if (btrfs_file_extent_type(src, extent) ==
3443 BTRFS_FILE_EXTENT_INLINE) {
3444 len = btrfs_file_extent_inline_len(src, i, extent);
3445 extent_end = ALIGN(key.offset + len, log->sectorsize);
3446 } else {
3447 len = btrfs_file_extent_num_bytes(src, extent);
3448 extent_end = key.offset + len;
3449 }
3450 i++;
3451
3452 if (*last_extent == key.offset) {
3453 *last_extent = extent_end;
3454 continue;
3455 }
3456 offset = *last_extent;
3457 len = key.offset - *last_extent;
3458 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3459 offset, 0, 0, len, 0, len, 0,
3460 0, 0);
3461 if (ret)
3462 break;
3463 *last_extent = offset + len;
3464 }
3465 /*
3466 * Need to let the callers know we dropped the path so they should
3467 * re-search.
3468 */
3469 if (!ret && need_find_last_extent)
3470 ret = 1;
3471 return ret;
3472 }
3473
3474 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3475 {
3476 struct extent_map *em1, *em2;
3477
3478 em1 = list_entry(a, struct extent_map, list);
3479 em2 = list_entry(b, struct extent_map, list);
3480
3481 if (em1->start < em2->start)
3482 return -1;
3483 else if (em1->start > em2->start)
3484 return 1;
3485 return 0;
3486 }
3487
3488 static int log_one_extent(struct btrfs_trans_handle *trans,
3489 struct inode *inode, struct btrfs_root *root,
3490 struct extent_map *em, struct btrfs_path *path,
3491 struct list_head *logged_list)
3492 {
3493 struct btrfs_root *log = root->log_root;
3494 struct btrfs_file_extent_item *fi;
3495 struct extent_buffer *leaf;
3496 struct btrfs_ordered_extent *ordered;
3497 struct list_head ordered_sums;
3498 struct btrfs_map_token token;
3499 struct btrfs_key key;
3500 u64 mod_start = em->mod_start;
3501 u64 mod_len = em->mod_len;
3502 u64 csum_offset;
3503 u64 csum_len;
3504 u64 extent_offset = em->start - em->orig_start;
3505 u64 block_len;
3506 int ret;
3507 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3508 int extent_inserted = 0;
3509
3510 INIT_LIST_HEAD(&ordered_sums);
3511 btrfs_init_map_token(&token);
3512
3513 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3514 em->start + em->len, NULL, 0, 1,
3515 sizeof(*fi), &extent_inserted);
3516 if (ret)
3517 return ret;
3518
3519 if (!extent_inserted) {
3520 key.objectid = btrfs_ino(inode);
3521 key.type = BTRFS_EXTENT_DATA_KEY;
3522 key.offset = em->start;
3523
3524 ret = btrfs_insert_empty_item(trans, log, path, &key,
3525 sizeof(*fi));
3526 if (ret)
3527 return ret;
3528 }
3529 leaf = path->nodes[0];
3530 fi = btrfs_item_ptr(leaf, path->slots[0],
3531 struct btrfs_file_extent_item);
3532
3533 btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
3534 &token);
3535 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3536 skip_csum = true;
3537 btrfs_set_token_file_extent_type(leaf, fi,
3538 BTRFS_FILE_EXTENT_PREALLOC,
3539 &token);
3540 } else {
3541 btrfs_set_token_file_extent_type(leaf, fi,
3542 BTRFS_FILE_EXTENT_REG,
3543 &token);
3544 if (em->block_start == EXTENT_MAP_HOLE)
3545 skip_csum = true;
3546 }
3547
3548 block_len = max(em->block_len, em->orig_block_len);
3549 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3550 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3551 em->block_start,
3552 &token);
3553 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3554 &token);
3555 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3556 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3557 em->block_start -
3558 extent_offset, &token);
3559 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3560 &token);
3561 } else {
3562 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3563 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3564 &token);
3565 }
3566
3567 btrfs_set_token_file_extent_offset(leaf, fi,
3568 em->start - em->orig_start,
3569 &token);
3570 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3571 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3572 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3573 &token);
3574 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3575 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3576 btrfs_mark_buffer_dirty(leaf);
3577
3578 btrfs_release_path(path);
3579 if (ret) {
3580 return ret;
3581 }
3582
3583 if (skip_csum)
3584 return 0;
3585
3586 /*
3587 * First check and see if our csums are on our outstanding ordered
3588 * extents.
3589 */
3590 list_for_each_entry(ordered, logged_list, log_list) {
3591 struct btrfs_ordered_sum *sum;
3592
3593 if (!mod_len)
3594 break;
3595
3596 if (ordered->file_offset + ordered->len <= mod_start ||
3597 mod_start + mod_len <= ordered->file_offset)
3598 continue;
3599
3600 /*
3601 * We are going to copy all the csums on this ordered extent, so
3602 * go ahead and adjust mod_start and mod_len in case this
3603 * ordered extent has already been logged.
3604 */
3605 if (ordered->file_offset > mod_start) {
3606 if (ordered->file_offset + ordered->len >=
3607 mod_start + mod_len)
3608 mod_len = ordered->file_offset - mod_start;
3609 /*
3610 * If we have this case
3611 *
3612 * |--------- logged extent ---------|
3613 * |----- ordered extent ----|
3614 *
3615 * Just don't mess with mod_start and mod_len, we'll
3616 * just end up logging more csums than we need and it
3617 * will be ok.
3618 */
3619 } else {
3620 if (ordered->file_offset + ordered->len <
3621 mod_start + mod_len) {
3622 mod_len = (mod_start + mod_len) -
3623 (ordered->file_offset + ordered->len);
3624 mod_start = ordered->file_offset +
3625 ordered->len;
3626 } else {
3627 mod_len = 0;
3628 }
3629 }
3630
3631 /*
3632 * To keep us from looping for the above case of an ordered
3633 * extent that falls inside of the logged extent.
3634 */
3635 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3636 &ordered->flags))
3637 continue;
3638
3639 if (ordered->csum_bytes_left) {
3640 btrfs_start_ordered_extent(inode, ordered, 0);
3641 wait_event(ordered->wait,
3642 ordered->csum_bytes_left == 0);
3643 }
3644
3645 list_for_each_entry(sum, &ordered->list, list) {
3646 ret = btrfs_csum_file_blocks(trans, log, sum);
3647 if (ret)
3648 goto unlocked;
3649 }
3650
3651 }
3652 unlocked:
3653
3654 if (!mod_len || ret)
3655 return ret;
3656
3657 if (em->compress_type) {
3658 csum_offset = 0;
3659 csum_len = block_len;
3660 } else {
3661 csum_offset = mod_start - em->start;
3662 csum_len = mod_len;
3663 }
3664
3665 /* block start is already adjusted for the file extent offset. */
3666 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3667 em->block_start + csum_offset,
3668 em->block_start + csum_offset +
3669 csum_len - 1, &ordered_sums, 0);
3670 if (ret)
3671 return ret;
3672
3673 while (!list_empty(&ordered_sums)) {
3674 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3675 struct btrfs_ordered_sum,
3676 list);
3677 if (!ret)
3678 ret = btrfs_csum_file_blocks(trans, log, sums);
3679 list_del(&sums->list);
3680 kfree(sums);
3681 }
3682
3683 return ret;
3684 }
3685
3686 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3687 struct btrfs_root *root,
3688 struct inode *inode,
3689 struct btrfs_path *path,
3690 struct list_head *logged_list)
3691 {
3692 struct extent_map *em, *n;
3693 struct list_head extents;
3694 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3695 u64 test_gen;
3696 int ret = 0;
3697 int num = 0;
3698
3699 INIT_LIST_HEAD(&extents);
3700
3701 write_lock(&tree->lock);
3702 test_gen = root->fs_info->last_trans_committed;
3703
3704 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3705 list_del_init(&em->list);
3706
3707 /*
3708 * Just an arbitrary number, this can be really CPU intensive
3709 * once we start getting a lot of extents, and really once we
3710 * have a bunch of extents we just want to commit since it will
3711 * be faster.
3712 */
3713 if (++num > 32768) {
3714 list_del_init(&tree->modified_extents);
3715 ret = -EFBIG;
3716 goto process;
3717 }
3718
3719 if (em->generation <= test_gen)
3720 continue;
3721 /* Need a ref to keep it from getting evicted from cache */
3722 atomic_inc(&em->refs);
3723 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3724 list_add_tail(&em->list, &extents);
3725 num++;
3726 }
3727
3728 list_sort(NULL, &extents, extent_cmp);
3729
3730 process:
3731 while (!list_empty(&extents)) {
3732 em = list_entry(extents.next, struct extent_map, list);
3733
3734 list_del_init(&em->list);
3735
3736 /*
3737 * If we had an error we just need to delete everybody from our
3738 * private list.
3739 */
3740 if (ret) {
3741 clear_em_logging(tree, em);
3742 free_extent_map(em);
3743 continue;
3744 }
3745
3746 write_unlock(&tree->lock);
3747
3748 ret = log_one_extent(trans, inode, root, em, path, logged_list);
3749 write_lock(&tree->lock);
3750 clear_em_logging(tree, em);
3751 free_extent_map(em);
3752 }
3753 WARN_ON(!list_empty(&extents));
3754 write_unlock(&tree->lock);
3755
3756 btrfs_release_path(path);
3757 return ret;
3758 }
3759
3760 /* log a single inode in the tree log.
3761 * At least one parent directory for this inode must exist in the tree
3762 * or be logged already.
3763 *
3764 * Any items from this inode changed by the current transaction are copied
3765 * to the log tree. An extra reference is taken on any extents in this
3766 * file, allowing us to avoid a whole pile of corner cases around logging
3767 * blocks that have been removed from the tree.
3768 *
3769 * See LOG_INODE_ALL and related defines for a description of what inode_only
3770 * does.
3771 *
3772 * This handles both files and directories.
3773 */
3774 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3775 struct btrfs_root *root, struct inode *inode,
3776 int inode_only)
3777 {
3778 struct btrfs_path *path;
3779 struct btrfs_path *dst_path;
3780 struct btrfs_key min_key;
3781 struct btrfs_key max_key;
3782 struct btrfs_root *log = root->log_root;
3783 struct extent_buffer *src = NULL;
3784 LIST_HEAD(logged_list);
3785 u64 last_extent = 0;
3786 int err = 0;
3787 int ret;
3788 int nritems;
3789 int ins_start_slot = 0;
3790 int ins_nr;
3791 bool fast_search = false;
3792 u64 ino = btrfs_ino(inode);
3793
3794 path = btrfs_alloc_path();
3795 if (!path)
3796 return -ENOMEM;
3797 dst_path = btrfs_alloc_path();
3798 if (!dst_path) {
3799 btrfs_free_path(path);
3800 return -ENOMEM;
3801 }
3802
3803 min_key.objectid = ino;
3804 min_key.type = BTRFS_INODE_ITEM_KEY;
3805 min_key.offset = 0;
3806
3807 max_key.objectid = ino;
3808
3809
3810 /* today the code can only do partial logging of directories */
3811 if (S_ISDIR(inode->i_mode) ||
3812 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3813 &BTRFS_I(inode)->runtime_flags) &&
3814 inode_only == LOG_INODE_EXISTS))
3815 max_key.type = BTRFS_XATTR_ITEM_KEY;
3816 else
3817 max_key.type = (u8)-1;
3818 max_key.offset = (u64)-1;
3819
3820 /* Only run delayed items if we are a dir or a new file */
3821 if (S_ISDIR(inode->i_mode) ||
3822 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3823 ret = btrfs_commit_inode_delayed_items(trans, inode);
3824 if (ret) {
3825 btrfs_free_path(path);
3826 btrfs_free_path(dst_path);
3827 return ret;
3828 }
3829 }
3830
3831 mutex_lock(&BTRFS_I(inode)->log_mutex);
3832
3833 btrfs_get_logged_extents(inode, &logged_list);
3834
3835 /*
3836 * a brute force approach to making sure we get the most uptodate
3837 * copies of everything.
3838 */
3839 if (S_ISDIR(inode->i_mode)) {
3840 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3841
3842 if (inode_only == LOG_INODE_EXISTS)
3843 max_key_type = BTRFS_XATTR_ITEM_KEY;
3844 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3845 } else {
3846 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3847 &BTRFS_I(inode)->runtime_flags)) {
3848 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3849 &BTRFS_I(inode)->runtime_flags);
3850 ret = btrfs_truncate_inode_items(trans, log,
3851 inode, 0, 0);
3852 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3853 &BTRFS_I(inode)->runtime_flags) ||
3854 inode_only == LOG_INODE_EXISTS) {
3855 if (inode_only == LOG_INODE_ALL)
3856 fast_search = true;
3857 max_key.type = BTRFS_XATTR_ITEM_KEY;
3858 ret = drop_objectid_items(trans, log, path, ino,
3859 max_key.type);
3860 } else {
3861 if (inode_only == LOG_INODE_ALL)
3862 fast_search = true;
3863 ret = log_inode_item(trans, log, dst_path, inode);
3864 if (ret) {
3865 err = ret;
3866 goto out_unlock;
3867 }
3868 goto log_extents;
3869 }
3870
3871 }
3872 if (ret) {
3873 err = ret;
3874 goto out_unlock;
3875 }
3876 path->keep_locks = 1;
3877
3878 while (1) {
3879 ins_nr = 0;
3880 ret = btrfs_search_forward(root, &min_key,
3881 path, trans->transid);
3882 if (ret != 0)
3883 break;
3884 again:
3885 /* note, ins_nr might be > 0 here, cleanup outside the loop */
3886 if (min_key.objectid != ino)
3887 break;
3888 if (min_key.type > max_key.type)
3889 break;
3890
3891 src = path->nodes[0];
3892 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
3893 ins_nr++;
3894 goto next_slot;
3895 } else if (!ins_nr) {
3896 ins_start_slot = path->slots[0];
3897 ins_nr = 1;
3898 goto next_slot;
3899 }
3900
3901 ret = copy_items(trans, inode, dst_path, path, &last_extent,
3902 ins_start_slot, ins_nr, inode_only);
3903 if (ret < 0) {
3904 err = ret;
3905 goto out_unlock;
3906 } if (ret) {
3907 ins_nr = 0;
3908 btrfs_release_path(path);
3909 continue;
3910 }
3911 ins_nr = 1;
3912 ins_start_slot = path->slots[0];
3913 next_slot:
3914
3915 nritems = btrfs_header_nritems(path->nodes[0]);
3916 path->slots[0]++;
3917 if (path->slots[0] < nritems) {
3918 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
3919 path->slots[0]);
3920 goto again;
3921 }
3922 if (ins_nr) {
3923 ret = copy_items(trans, inode, dst_path, path,
3924 &last_extent, ins_start_slot,
3925 ins_nr, inode_only);
3926 if (ret < 0) {
3927 err = ret;
3928 goto out_unlock;
3929 }
3930 ret = 0;
3931 ins_nr = 0;
3932 }
3933 btrfs_release_path(path);
3934
3935 if (min_key.offset < (u64)-1) {
3936 min_key.offset++;
3937 } else if (min_key.type < max_key.type) {
3938 min_key.type++;
3939 min_key.offset = 0;
3940 } else {
3941 break;
3942 }
3943 }
3944 if (ins_nr) {
3945 ret = copy_items(trans, inode, dst_path, path, &last_extent,
3946 ins_start_slot, ins_nr, inode_only);
3947 if (ret < 0) {
3948 err = ret;
3949 goto out_unlock;
3950 }
3951 ret = 0;
3952 ins_nr = 0;
3953 }
3954
3955 log_extents:
3956 btrfs_release_path(path);
3957 btrfs_release_path(dst_path);
3958 if (fast_search) {
3959 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
3960 &logged_list);
3961 if (ret) {
3962 err = ret;
3963 goto out_unlock;
3964 }
3965 } else if (inode_only == LOG_INODE_ALL) {
3966 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3967 struct extent_map *em, *n;
3968
3969 write_lock(&tree->lock);
3970 list_for_each_entry_safe(em, n, &tree->modified_extents, list)
3971 list_del_init(&em->list);
3972 write_unlock(&tree->lock);
3973 }
3974
3975 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
3976 ret = log_directory_changes(trans, root, inode, path, dst_path);
3977 if (ret) {
3978 err = ret;
3979 goto out_unlock;
3980 }
3981 }
3982 BTRFS_I(inode)->logged_trans = trans->transid;
3983 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
3984 out_unlock:
3985 if (unlikely(err))
3986 btrfs_put_logged_extents(&logged_list);
3987 else
3988 btrfs_submit_logged_extents(&logged_list, log);
3989 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3990
3991 btrfs_free_path(path);
3992 btrfs_free_path(dst_path);
3993 return err;
3994 }
3995
3996 /*
3997 * follow the dentry parent pointers up the chain and see if any
3998 * of the directories in it require a full commit before they can
3999 * be logged. Returns zero if nothing special needs to be done or 1 if
4000 * a full commit is required.
4001 */
4002 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4003 struct inode *inode,
4004 struct dentry *parent,
4005 struct super_block *sb,
4006 u64 last_committed)
4007 {
4008 int ret = 0;
4009 struct btrfs_root *root;
4010 struct dentry *old_parent = NULL;
4011 struct inode *orig_inode = inode;
4012
4013 /*
4014 * for regular files, if its inode is already on disk, we don't
4015 * have to worry about the parents at all. This is because
4016 * we can use the last_unlink_trans field to record renames
4017 * and other fun in this file.
4018 */
4019 if (S_ISREG(inode->i_mode) &&
4020 BTRFS_I(inode)->generation <= last_committed &&
4021 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4022 goto out;
4023
4024 if (!S_ISDIR(inode->i_mode)) {
4025 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4026 goto out;
4027 inode = parent->d_inode;
4028 }
4029
4030 while (1) {
4031 /*
4032 * If we are logging a directory then we start with our inode,
4033 * not our parents inode, so we need to skipp setting the
4034 * logged_trans so that further down in the log code we don't
4035 * think this inode has already been logged.
4036 */
4037 if (inode != orig_inode)
4038 BTRFS_I(inode)->logged_trans = trans->transid;
4039 smp_mb();
4040
4041 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4042 root = BTRFS_I(inode)->root;
4043
4044 /*
4045 * make sure any commits to the log are forced
4046 * to be full commits
4047 */
4048 root->fs_info->last_trans_log_full_commit =
4049 trans->transid;
4050 ret = 1;
4051 break;
4052 }
4053
4054 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4055 break;
4056
4057 if (IS_ROOT(parent))
4058 break;
4059
4060 parent = dget_parent(parent);
4061 dput(old_parent);
4062 old_parent = parent;
4063 inode = parent->d_inode;
4064
4065 }
4066 dput(old_parent);
4067 out:
4068 return ret;
4069 }
4070
4071 /*
4072 * helper function around btrfs_log_inode to make sure newly created
4073 * parent directories also end up in the log. A minimal inode and backref
4074 * only logging is done of any parent directories that are older than
4075 * the last committed transaction
4076 */
4077 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4078 struct btrfs_root *root, struct inode *inode,
4079 struct dentry *parent, int exists_only)
4080 {
4081 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
4082 struct super_block *sb;
4083 struct dentry *old_parent = NULL;
4084 int ret = 0;
4085 u64 last_committed = root->fs_info->last_trans_committed;
4086
4087 sb = inode->i_sb;
4088
4089 if (btrfs_test_opt(root, NOTREELOG)) {
4090 ret = 1;
4091 goto end_no_trans;
4092 }
4093
4094 if (root->fs_info->last_trans_log_full_commit >
4095 root->fs_info->last_trans_committed) {
4096 ret = 1;
4097 goto end_no_trans;
4098 }
4099
4100 if (root != BTRFS_I(inode)->root ||
4101 btrfs_root_refs(&root->root_item) == 0) {
4102 ret = 1;
4103 goto end_no_trans;
4104 }
4105
4106 ret = check_parent_dirs_for_sync(trans, inode, parent,
4107 sb, last_committed);
4108 if (ret)
4109 goto end_no_trans;
4110
4111 if (btrfs_inode_in_log(inode, trans->transid)) {
4112 ret = BTRFS_NO_LOG_SYNC;
4113 goto end_no_trans;
4114 }
4115
4116 ret = start_log_trans(trans, root);
4117 if (ret)
4118 goto end_no_trans;
4119
4120 ret = btrfs_log_inode(trans, root, inode, inode_only);
4121 if (ret)
4122 goto end_trans;
4123
4124 /*
4125 * for regular files, if its inode is already on disk, we don't
4126 * have to worry about the parents at all. This is because
4127 * we can use the last_unlink_trans field to record renames
4128 * and other fun in this file.
4129 */
4130 if (S_ISREG(inode->i_mode) &&
4131 BTRFS_I(inode)->generation <= last_committed &&
4132 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
4133 ret = 0;
4134 goto end_trans;
4135 }
4136
4137 inode_only = LOG_INODE_EXISTS;
4138 while (1) {
4139 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4140 break;
4141
4142 inode = parent->d_inode;
4143 if (root != BTRFS_I(inode)->root)
4144 break;
4145
4146 if (BTRFS_I(inode)->generation >
4147 root->fs_info->last_trans_committed) {
4148 ret = btrfs_log_inode(trans, root, inode, inode_only);
4149 if (ret)
4150 goto end_trans;
4151 }
4152 if (IS_ROOT(parent))
4153 break;
4154
4155 parent = dget_parent(parent);
4156 dput(old_parent);
4157 old_parent = parent;
4158 }
4159 ret = 0;
4160 end_trans:
4161 dput(old_parent);
4162 if (ret < 0) {
4163 root->fs_info->last_trans_log_full_commit = trans->transid;
4164 ret = 1;
4165 }
4166 btrfs_end_log_trans(root);
4167 end_no_trans:
4168 return ret;
4169 }
4170
4171 /*
4172 * it is not safe to log dentry if the chunk root has added new
4173 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4174 * If this returns 1, you must commit the transaction to safely get your
4175 * data on disk.
4176 */
4177 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4178 struct btrfs_root *root, struct dentry *dentry)
4179 {
4180 struct dentry *parent = dget_parent(dentry);
4181 int ret;
4182
4183 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
4184 dput(parent);
4185
4186 return ret;
4187 }
4188
4189 /*
4190 * should be called during mount to recover any replay any log trees
4191 * from the FS
4192 */
4193 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4194 {
4195 int ret;
4196 struct btrfs_path *path;
4197 struct btrfs_trans_handle *trans;
4198 struct btrfs_key key;
4199 struct btrfs_key found_key;
4200 struct btrfs_key tmp_key;
4201 struct btrfs_root *log;
4202 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4203 struct walk_control wc = {
4204 .process_func = process_one_buffer,
4205 .stage = 0,
4206 };
4207
4208 path = btrfs_alloc_path();
4209 if (!path)
4210 return -ENOMEM;
4211
4212 fs_info->log_root_recovering = 1;
4213
4214 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4215 if (IS_ERR(trans)) {
4216 ret = PTR_ERR(trans);
4217 goto error;
4218 }
4219
4220 wc.trans = trans;
4221 wc.pin = 1;
4222
4223 ret = walk_log_tree(trans, log_root_tree, &wc);
4224 if (ret) {
4225 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4226 "recovering log root tree.");
4227 goto error;
4228 }
4229
4230 again:
4231 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4232 key.offset = (u64)-1;
4233 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
4234
4235 while (1) {
4236 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4237
4238 if (ret < 0) {
4239 btrfs_error(fs_info, ret,
4240 "Couldn't find tree log root.");
4241 goto error;
4242 }
4243 if (ret > 0) {
4244 if (path->slots[0] == 0)
4245 break;
4246 path->slots[0]--;
4247 }
4248 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4249 path->slots[0]);
4250 btrfs_release_path(path);
4251 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4252 break;
4253
4254 log = btrfs_read_fs_root(log_root_tree, &found_key);
4255 if (IS_ERR(log)) {
4256 ret = PTR_ERR(log);
4257 btrfs_error(fs_info, ret,
4258 "Couldn't read tree log root.");
4259 goto error;
4260 }
4261
4262 tmp_key.objectid = found_key.offset;
4263 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4264 tmp_key.offset = (u64)-1;
4265
4266 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4267 if (IS_ERR(wc.replay_dest)) {
4268 ret = PTR_ERR(wc.replay_dest);
4269 free_extent_buffer(log->node);
4270 free_extent_buffer(log->commit_root);
4271 kfree(log);
4272 btrfs_error(fs_info, ret, "Couldn't read target root "
4273 "for tree log recovery.");
4274 goto error;
4275 }
4276
4277 wc.replay_dest->log_root = log;
4278 btrfs_record_root_in_trans(trans, wc.replay_dest);
4279 ret = walk_log_tree(trans, log, &wc);
4280
4281 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4282 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4283 path);
4284 }
4285
4286 key.offset = found_key.offset - 1;
4287 wc.replay_dest->log_root = NULL;
4288 free_extent_buffer(log->node);
4289 free_extent_buffer(log->commit_root);
4290 kfree(log);
4291
4292 if (ret)
4293 goto error;
4294
4295 if (found_key.offset == 0)
4296 break;
4297 }
4298 btrfs_release_path(path);
4299
4300 /* step one is to pin it all, step two is to replay just inodes */
4301 if (wc.pin) {
4302 wc.pin = 0;
4303 wc.process_func = replay_one_buffer;
4304 wc.stage = LOG_WALK_REPLAY_INODES;
4305 goto again;
4306 }
4307 /* step three is to replay everything */
4308 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4309 wc.stage++;
4310 goto again;
4311 }
4312
4313 btrfs_free_path(path);
4314
4315 /* step 4: commit the transaction, which also unpins the blocks */
4316 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4317 if (ret)
4318 return ret;
4319
4320 free_extent_buffer(log_root_tree->node);
4321 log_root_tree->log_root = NULL;
4322 fs_info->log_root_recovering = 0;
4323 kfree(log_root_tree);
4324
4325 return 0;
4326 error:
4327 if (wc.trans)
4328 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4329 btrfs_free_path(path);
4330 return ret;
4331 }
4332
4333 /*
4334 * there are some corner cases where we want to force a full
4335 * commit instead of allowing a directory to be logged.
4336 *
4337 * They revolve around files there were unlinked from the directory, and
4338 * this function updates the parent directory so that a full commit is
4339 * properly done if it is fsync'd later after the unlinks are done.
4340 */
4341 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4342 struct inode *dir, struct inode *inode,
4343 int for_rename)
4344 {
4345 /*
4346 * when we're logging a file, if it hasn't been renamed
4347 * or unlinked, and its inode is fully committed on disk,
4348 * we don't have to worry about walking up the directory chain
4349 * to log its parents.
4350 *
4351 * So, we use the last_unlink_trans field to put this transid
4352 * into the file. When the file is logged we check it and
4353 * don't log the parents if the file is fully on disk.
4354 */
4355 if (S_ISREG(inode->i_mode))
4356 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4357
4358 /*
4359 * if this directory was already logged any new
4360 * names for this file/dir will get recorded
4361 */
4362 smp_mb();
4363 if (BTRFS_I(dir)->logged_trans == trans->transid)
4364 return;
4365
4366 /*
4367 * if the inode we're about to unlink was logged,
4368 * the log will be properly updated for any new names
4369 */
4370 if (BTRFS_I(inode)->logged_trans == trans->transid)
4371 return;
4372
4373 /*
4374 * when renaming files across directories, if the directory
4375 * there we're unlinking from gets fsync'd later on, there's
4376 * no way to find the destination directory later and fsync it
4377 * properly. So, we have to be conservative and force commits
4378 * so the new name gets discovered.
4379 */
4380 if (for_rename)
4381 goto record;
4382
4383 /* we can safely do the unlink without any special recording */
4384 return;
4385
4386 record:
4387 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4388 }
4389
4390 /*
4391 * Call this after adding a new name for a file and it will properly
4392 * update the log to reflect the new name.
4393 *
4394 * It will return zero if all goes well, and it will return 1 if a
4395 * full transaction commit is required.
4396 */
4397 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4398 struct inode *inode, struct inode *old_dir,
4399 struct dentry *parent)
4400 {
4401 struct btrfs_root * root = BTRFS_I(inode)->root;
4402
4403 /*
4404 * this will force the logging code to walk the dentry chain
4405 * up for the file
4406 */
4407 if (S_ISREG(inode->i_mode))
4408 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4409
4410 /*
4411 * if this inode hasn't been logged and directory we're renaming it
4412 * from hasn't been logged, we don't need to log it
4413 */
4414 if (BTRFS_I(inode)->logged_trans <=
4415 root->fs_info->last_trans_committed &&
4416 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4417 root->fs_info->last_trans_committed))
4418 return 0;
4419
4420 return btrfs_log_inode_parent(trans, root, inode, parent, 1);
4421 }
4422
This page took 0.124912 seconds and 5 git commands to generate.