Btrfs: check if previous transaction aborted to avoid fs corruption
[deliverable/linux.git] / fs / btrfs / tree-log.c
CommitLineData
e02119d5
CM
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
c6adc9cc 21#include <linux/blkdev.h>
5dc562c5 22#include <linux/list_sort.h>
995946dd 23#include "tree-log.h"
e02119d5
CM
24#include "disk-io.h"
25#include "locking.h"
26#include "print-tree.h"
f186373f 27#include "backref.h"
f186373f 28#include "hash.h"
e02119d5
CM
29
30/* magic values for the inode_only field in btrfs_log_inode:
31 *
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
34 * during log replay
35 */
36#define LOG_INODE_ALL 0
37#define LOG_INODE_EXISTS 1
38
12fcfd22
CM
39/*
40 * directory trouble cases
41 *
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
46 *
47 * mkdir foo/some_dir
48 * normal commit
49 * rename foo/some_dir foo2/some_dir
50 * mkdir foo/some_dir
51 * fsync foo/some_dir/some_file
52 *
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
56 *
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
59 *
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
63 *
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
66 *
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
69 *
70 * mkdir f1/foo
71 * normal commit
72 * rm -rf f1/foo
73 * fsync(f1)
74 *
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
79 * ugly details.
80 */
81
e02119d5
CM
82/*
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
87 *
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
90 */
91#define LOG_WALK_PIN_ONLY 0
92#define LOG_WALK_REPLAY_INODES 1
dd8e7217
JB
93#define LOG_WALK_REPLAY_DIR_INDEX 2
94#define LOG_WALK_REPLAY_ALL 3
e02119d5 95
12fcfd22 96static int btrfs_log_inode(struct btrfs_trans_handle *trans,
49dae1bc
FM
97 struct btrfs_root *root, struct inode *inode,
98 int inode_only,
99 const loff_t start,
8407f553
FM
100 const loff_t end,
101 struct btrfs_log_ctx *ctx);
ec051c0f
YZ
102static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root,
104 struct btrfs_path *path, u64 objectid);
12fcfd22
CM
105static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_root *log,
108 struct btrfs_path *path,
109 u64 dirid, int del_all);
e02119d5
CM
110
111/*
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
114 *
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
118 *
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
124 *
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
128 *
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
132 */
133
e02119d5
CM
134/*
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
138 */
139static int start_log_trans(struct btrfs_trans_handle *trans,
8b050d35
MX
140 struct btrfs_root *root,
141 struct btrfs_log_ctx *ctx)
e02119d5 142{
34eb2a52 143 int ret = 0;
7237f183
YZ
144
145 mutex_lock(&root->log_mutex);
34eb2a52 146
7237f183 147 if (root->log_root) {
995946dd 148 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
50471a38
MX
149 ret = -EAGAIN;
150 goto out;
151 }
34eb2a52 152
ff782e0a 153 if (!root->log_start_pid) {
27cdeb70 154 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
34eb2a52 155 root->log_start_pid = current->pid;
ff782e0a 156 } else if (root->log_start_pid != current->pid) {
27cdeb70 157 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
ff782e0a 158 }
34eb2a52
Z
159 } else {
160 mutex_lock(&root->fs_info->tree_log_mutex);
161 if (!root->fs_info->log_root_tree)
162 ret = btrfs_init_log_root_tree(trans, root->fs_info);
163 mutex_unlock(&root->fs_info->tree_log_mutex);
164 if (ret)
165 goto out;
ff782e0a 166
e02119d5 167 ret = btrfs_add_log_tree(trans, root);
4a500fd1 168 if (ret)
e87ac136 169 goto out;
34eb2a52
Z
170
171 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
172 root->log_start_pid = current->pid;
e02119d5 173 }
34eb2a52 174
2ecb7923 175 atomic_inc(&root->log_batch);
7237f183 176 atomic_inc(&root->log_writers);
8b050d35 177 if (ctx) {
34eb2a52 178 int index = root->log_transid % 2;
8b050d35 179 list_add_tail(&ctx->list, &root->log_ctxs[index]);
d1433deb 180 ctx->log_transid = root->log_transid;
8b050d35 181 }
34eb2a52 182
e87ac136 183out:
7237f183 184 mutex_unlock(&root->log_mutex);
e87ac136 185 return ret;
e02119d5
CM
186}
187
188/*
189 * returns 0 if there was a log transaction running and we were able
190 * to join, or returns -ENOENT if there were not transactions
191 * in progress
192 */
193static int join_running_log_trans(struct btrfs_root *root)
194{
195 int ret = -ENOENT;
196
197 smp_mb();
198 if (!root->log_root)
199 return -ENOENT;
200
7237f183 201 mutex_lock(&root->log_mutex);
e02119d5
CM
202 if (root->log_root) {
203 ret = 0;
7237f183 204 atomic_inc(&root->log_writers);
e02119d5 205 }
7237f183 206 mutex_unlock(&root->log_mutex);
e02119d5
CM
207 return ret;
208}
209
12fcfd22
CM
210/*
211 * This either makes the current running log transaction wait
212 * until you call btrfs_end_log_trans() or it makes any future
213 * log transactions wait until you call btrfs_end_log_trans()
214 */
215int btrfs_pin_log_trans(struct btrfs_root *root)
216{
217 int ret = -ENOENT;
218
219 mutex_lock(&root->log_mutex);
220 atomic_inc(&root->log_writers);
221 mutex_unlock(&root->log_mutex);
222 return ret;
223}
224
e02119d5
CM
225/*
226 * indicate we're done making changes to the log tree
227 * and wake up anyone waiting to do a sync
228 */
143bede5 229void btrfs_end_log_trans(struct btrfs_root *root)
e02119d5 230{
7237f183
YZ
231 if (atomic_dec_and_test(&root->log_writers)) {
232 smp_mb();
233 if (waitqueue_active(&root->log_writer_wait))
234 wake_up(&root->log_writer_wait);
235 }
e02119d5
CM
236}
237
238
239/*
240 * the walk control struct is used to pass state down the chain when
241 * processing the log tree. The stage field tells us which part
242 * of the log tree processing we are currently doing. The others
243 * are state fields used for that specific part
244 */
245struct walk_control {
246 /* should we free the extent on disk when done? This is used
247 * at transaction commit time while freeing a log tree
248 */
249 int free;
250
251 /* should we write out the extent buffer? This is used
252 * while flushing the log tree to disk during a sync
253 */
254 int write;
255
256 /* should we wait for the extent buffer io to finish? Also used
257 * while flushing the log tree to disk for a sync
258 */
259 int wait;
260
261 /* pin only walk, we record which extents on disk belong to the
262 * log trees
263 */
264 int pin;
265
266 /* what stage of the replay code we're currently in */
267 int stage;
268
269 /* the root we are currently replaying */
270 struct btrfs_root *replay_dest;
271
272 /* the trans handle for the current replay */
273 struct btrfs_trans_handle *trans;
274
275 /* the function that gets used to process blocks we find in the
276 * tree. Note the extent_buffer might not be up to date when it is
277 * passed in, and it must be checked or read if you need the data
278 * inside it
279 */
280 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
281 struct walk_control *wc, u64 gen);
282};
283
284/*
285 * process_func used to pin down extents, write them or wait on them
286 */
287static int process_one_buffer(struct btrfs_root *log,
288 struct extent_buffer *eb,
289 struct walk_control *wc, u64 gen)
290{
b50c6e25
JB
291 int ret = 0;
292
8c2a1a30
JB
293 /*
294 * If this fs is mixed then we need to be able to process the leaves to
295 * pin down any logged extents, so we have to read the block.
296 */
297 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
298 ret = btrfs_read_buffer(eb, gen);
299 if (ret)
300 return ret;
301 }
302
04018de5 303 if (wc->pin)
b50c6e25
JB
304 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
305 eb->start, eb->len);
e02119d5 306
b50c6e25 307 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
8c2a1a30
JB
308 if (wc->pin && btrfs_header_level(eb) == 0)
309 ret = btrfs_exclude_logged_extents(log, eb);
e02119d5
CM
310 if (wc->write)
311 btrfs_write_tree_block(eb);
312 if (wc->wait)
313 btrfs_wait_tree_block_writeback(eb);
314 }
b50c6e25 315 return ret;
e02119d5
CM
316}
317
318/*
319 * Item overwrite used by replay and tree logging. eb, slot and key all refer
320 * to the src data we are copying out.
321 *
322 * root is the tree we are copying into, and path is a scratch
323 * path for use in this function (it should be released on entry and
324 * will be released on exit).
325 *
326 * If the key is already in the destination tree the existing item is
327 * overwritten. If the existing item isn't big enough, it is extended.
328 * If it is too large, it is truncated.
329 *
330 * If the key isn't in the destination yet, a new item is inserted.
331 */
332static noinline int overwrite_item(struct btrfs_trans_handle *trans,
333 struct btrfs_root *root,
334 struct btrfs_path *path,
335 struct extent_buffer *eb, int slot,
336 struct btrfs_key *key)
337{
338 int ret;
339 u32 item_size;
340 u64 saved_i_size = 0;
341 int save_old_i_size = 0;
342 unsigned long src_ptr;
343 unsigned long dst_ptr;
344 int overwrite_root = 0;
4bc4bee4 345 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
e02119d5
CM
346
347 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
348 overwrite_root = 1;
349
350 item_size = btrfs_item_size_nr(eb, slot);
351 src_ptr = btrfs_item_ptr_offset(eb, slot);
352
353 /* look for the key in the destination tree */
354 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
4bc4bee4
JB
355 if (ret < 0)
356 return ret;
357
e02119d5
CM
358 if (ret == 0) {
359 char *src_copy;
360 char *dst_copy;
361 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
362 path->slots[0]);
363 if (dst_size != item_size)
364 goto insert;
365
366 if (item_size == 0) {
b3b4aa74 367 btrfs_release_path(path);
e02119d5
CM
368 return 0;
369 }
370 dst_copy = kmalloc(item_size, GFP_NOFS);
371 src_copy = kmalloc(item_size, GFP_NOFS);
2a29edc6 372 if (!dst_copy || !src_copy) {
b3b4aa74 373 btrfs_release_path(path);
2a29edc6 374 kfree(dst_copy);
375 kfree(src_copy);
376 return -ENOMEM;
377 }
e02119d5
CM
378
379 read_extent_buffer(eb, src_copy, src_ptr, item_size);
380
381 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
382 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
383 item_size);
384 ret = memcmp(dst_copy, src_copy, item_size);
385
386 kfree(dst_copy);
387 kfree(src_copy);
388 /*
389 * they have the same contents, just return, this saves
390 * us from cowing blocks in the destination tree and doing
391 * extra writes that may not have been done by a previous
392 * sync
393 */
394 if (ret == 0) {
b3b4aa74 395 btrfs_release_path(path);
e02119d5
CM
396 return 0;
397 }
398
4bc4bee4
JB
399 /*
400 * We need to load the old nbytes into the inode so when we
401 * replay the extents we've logged we get the right nbytes.
402 */
403 if (inode_item) {
404 struct btrfs_inode_item *item;
405 u64 nbytes;
d555438b 406 u32 mode;
4bc4bee4
JB
407
408 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
409 struct btrfs_inode_item);
410 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
411 item = btrfs_item_ptr(eb, slot,
412 struct btrfs_inode_item);
413 btrfs_set_inode_nbytes(eb, item, nbytes);
d555438b
JB
414
415 /*
416 * If this is a directory we need to reset the i_size to
417 * 0 so that we can set it up properly when replaying
418 * the rest of the items in this log.
419 */
420 mode = btrfs_inode_mode(eb, item);
421 if (S_ISDIR(mode))
422 btrfs_set_inode_size(eb, item, 0);
4bc4bee4
JB
423 }
424 } else if (inode_item) {
425 struct btrfs_inode_item *item;
d555438b 426 u32 mode;
4bc4bee4
JB
427
428 /*
429 * New inode, set nbytes to 0 so that the nbytes comes out
430 * properly when we replay the extents.
431 */
432 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
433 btrfs_set_inode_nbytes(eb, item, 0);
d555438b
JB
434
435 /*
436 * If this is a directory we need to reset the i_size to 0 so
437 * that we can set it up properly when replaying the rest of
438 * the items in this log.
439 */
440 mode = btrfs_inode_mode(eb, item);
441 if (S_ISDIR(mode))
442 btrfs_set_inode_size(eb, item, 0);
e02119d5
CM
443 }
444insert:
b3b4aa74 445 btrfs_release_path(path);
e02119d5 446 /* try to insert the key into the destination tree */
df8d116f 447 path->skip_release_on_error = 1;
e02119d5
CM
448 ret = btrfs_insert_empty_item(trans, root, path,
449 key, item_size);
df8d116f 450 path->skip_release_on_error = 0;
e02119d5
CM
451
452 /* make sure any existing item is the correct size */
df8d116f 453 if (ret == -EEXIST || ret == -EOVERFLOW) {
e02119d5
CM
454 u32 found_size;
455 found_size = btrfs_item_size_nr(path->nodes[0],
456 path->slots[0]);
143bede5 457 if (found_size > item_size)
afe5fea7 458 btrfs_truncate_item(root, path, item_size, 1);
143bede5 459 else if (found_size < item_size)
4b90c680 460 btrfs_extend_item(root, path,
143bede5 461 item_size - found_size);
e02119d5 462 } else if (ret) {
4a500fd1 463 return ret;
e02119d5
CM
464 }
465 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
466 path->slots[0]);
467
468 /* don't overwrite an existing inode if the generation number
469 * was logged as zero. This is done when the tree logging code
470 * is just logging an inode to make sure it exists after recovery.
471 *
472 * Also, don't overwrite i_size on directories during replay.
473 * log replay inserts and removes directory items based on the
474 * state of the tree found in the subvolume, and i_size is modified
475 * as it goes
476 */
477 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
478 struct btrfs_inode_item *src_item;
479 struct btrfs_inode_item *dst_item;
480
481 src_item = (struct btrfs_inode_item *)src_ptr;
482 dst_item = (struct btrfs_inode_item *)dst_ptr;
483
1a4bcf47
FM
484 if (btrfs_inode_generation(eb, src_item) == 0) {
485 struct extent_buffer *dst_eb = path->nodes[0];
2f2ff0ee 486 const u64 ino_size = btrfs_inode_size(eb, src_item);
1a4bcf47 487
2f2ff0ee
FM
488 /*
489 * For regular files an ino_size == 0 is used only when
490 * logging that an inode exists, as part of a directory
491 * fsync, and the inode wasn't fsynced before. In this
492 * case don't set the size of the inode in the fs/subvol
493 * tree, otherwise we would be throwing valid data away.
494 */
1a4bcf47 495 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
2f2ff0ee
FM
496 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
497 ino_size != 0) {
1a4bcf47 498 struct btrfs_map_token token;
1a4bcf47
FM
499
500 btrfs_init_map_token(&token);
501 btrfs_set_token_inode_size(dst_eb, dst_item,
502 ino_size, &token);
503 }
e02119d5 504 goto no_copy;
1a4bcf47 505 }
e02119d5
CM
506
507 if (overwrite_root &&
508 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
509 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
510 save_old_i_size = 1;
511 saved_i_size = btrfs_inode_size(path->nodes[0],
512 dst_item);
513 }
514 }
515
516 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
517 src_ptr, item_size);
518
519 if (save_old_i_size) {
520 struct btrfs_inode_item *dst_item;
521 dst_item = (struct btrfs_inode_item *)dst_ptr;
522 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
523 }
524
525 /* make sure the generation is filled in */
526 if (key->type == BTRFS_INODE_ITEM_KEY) {
527 struct btrfs_inode_item *dst_item;
528 dst_item = (struct btrfs_inode_item *)dst_ptr;
529 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
530 btrfs_set_inode_generation(path->nodes[0], dst_item,
531 trans->transid);
532 }
533 }
534no_copy:
535 btrfs_mark_buffer_dirty(path->nodes[0]);
b3b4aa74 536 btrfs_release_path(path);
e02119d5
CM
537 return 0;
538}
539
540/*
541 * simple helper to read an inode off the disk from a given root
542 * This can only be called for subvolume roots and not for the log
543 */
544static noinline struct inode *read_one_inode(struct btrfs_root *root,
545 u64 objectid)
546{
5d4f98a2 547 struct btrfs_key key;
e02119d5 548 struct inode *inode;
e02119d5 549
5d4f98a2
YZ
550 key.objectid = objectid;
551 key.type = BTRFS_INODE_ITEM_KEY;
552 key.offset = 0;
73f73415 553 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
5d4f98a2
YZ
554 if (IS_ERR(inode)) {
555 inode = NULL;
556 } else if (is_bad_inode(inode)) {
e02119d5
CM
557 iput(inode);
558 inode = NULL;
559 }
560 return inode;
561}
562
563/* replays a single extent in 'eb' at 'slot' with 'key' into the
564 * subvolume 'root'. path is released on entry and should be released
565 * on exit.
566 *
567 * extents in the log tree have not been allocated out of the extent
568 * tree yet. So, this completes the allocation, taking a reference
569 * as required if the extent already exists or creating a new extent
570 * if it isn't in the extent allocation tree yet.
571 *
572 * The extent is inserted into the file, dropping any existing extents
573 * from the file that overlap the new one.
574 */
575static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
576 struct btrfs_root *root,
577 struct btrfs_path *path,
578 struct extent_buffer *eb, int slot,
579 struct btrfs_key *key)
580{
581 int found_type;
e02119d5 582 u64 extent_end;
e02119d5 583 u64 start = key->offset;
4bc4bee4 584 u64 nbytes = 0;
e02119d5
CM
585 struct btrfs_file_extent_item *item;
586 struct inode *inode = NULL;
587 unsigned long size;
588 int ret = 0;
589
590 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
591 found_type = btrfs_file_extent_type(eb, item);
592
d899e052 593 if (found_type == BTRFS_FILE_EXTENT_REG ||
4bc4bee4
JB
594 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
595 nbytes = btrfs_file_extent_num_bytes(eb, item);
596 extent_end = start + nbytes;
597
598 /*
599 * We don't add to the inodes nbytes if we are prealloc or a
600 * hole.
601 */
602 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
603 nbytes = 0;
604 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
514ac8ad 605 size = btrfs_file_extent_inline_len(eb, slot, item);
4bc4bee4 606 nbytes = btrfs_file_extent_ram_bytes(eb, item);
fda2832f 607 extent_end = ALIGN(start + size, root->sectorsize);
e02119d5
CM
608 } else {
609 ret = 0;
610 goto out;
611 }
612
613 inode = read_one_inode(root, key->objectid);
614 if (!inode) {
615 ret = -EIO;
616 goto out;
617 }
618
619 /*
620 * first check to see if we already have this extent in the
621 * file. This must be done before the btrfs_drop_extents run
622 * so we don't try to drop this extent.
623 */
33345d01 624 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
e02119d5
CM
625 start, 0);
626
d899e052
YZ
627 if (ret == 0 &&
628 (found_type == BTRFS_FILE_EXTENT_REG ||
629 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
e02119d5
CM
630 struct btrfs_file_extent_item cmp1;
631 struct btrfs_file_extent_item cmp2;
632 struct btrfs_file_extent_item *existing;
633 struct extent_buffer *leaf;
634
635 leaf = path->nodes[0];
636 existing = btrfs_item_ptr(leaf, path->slots[0],
637 struct btrfs_file_extent_item);
638
639 read_extent_buffer(eb, &cmp1, (unsigned long)item,
640 sizeof(cmp1));
641 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
642 sizeof(cmp2));
643
644 /*
645 * we already have a pointer to this exact extent,
646 * we don't have to do anything
647 */
648 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
b3b4aa74 649 btrfs_release_path(path);
e02119d5
CM
650 goto out;
651 }
652 }
b3b4aa74 653 btrfs_release_path(path);
e02119d5
CM
654
655 /* drop any overlapping extents */
2671485d 656 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
3650860b
JB
657 if (ret)
658 goto out;
e02119d5 659
07d400a6
YZ
660 if (found_type == BTRFS_FILE_EXTENT_REG ||
661 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5d4f98a2 662 u64 offset;
07d400a6
YZ
663 unsigned long dest_offset;
664 struct btrfs_key ins;
665
666 ret = btrfs_insert_empty_item(trans, root, path, key,
667 sizeof(*item));
3650860b
JB
668 if (ret)
669 goto out;
07d400a6
YZ
670 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
671 path->slots[0]);
672 copy_extent_buffer(path->nodes[0], eb, dest_offset,
673 (unsigned long)item, sizeof(*item));
674
675 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
676 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
677 ins.type = BTRFS_EXTENT_ITEM_KEY;
5d4f98a2 678 offset = key->offset - btrfs_file_extent_offset(eb, item);
07d400a6
YZ
679
680 if (ins.objectid > 0) {
681 u64 csum_start;
682 u64 csum_end;
683 LIST_HEAD(ordered_sums);
684 /*
685 * is this extent already allocated in the extent
686 * allocation tree? If so, just add a reference
687 */
1a4ed8fd 688 ret = btrfs_lookup_data_extent(root, ins.objectid,
07d400a6
YZ
689 ins.offset);
690 if (ret == 0) {
691 ret = btrfs_inc_extent_ref(trans, root,
692 ins.objectid, ins.offset,
5d4f98a2 693 0, root->root_key.objectid,
66d7e7f0 694 key->objectid, offset, 0);
b50c6e25
JB
695 if (ret)
696 goto out;
07d400a6
YZ
697 } else {
698 /*
699 * insert the extent pointer in the extent
700 * allocation tree
701 */
5d4f98a2
YZ
702 ret = btrfs_alloc_logged_file_extent(trans,
703 root, root->root_key.objectid,
704 key->objectid, offset, &ins);
b50c6e25
JB
705 if (ret)
706 goto out;
07d400a6 707 }
b3b4aa74 708 btrfs_release_path(path);
07d400a6
YZ
709
710 if (btrfs_file_extent_compression(eb, item)) {
711 csum_start = ins.objectid;
712 csum_end = csum_start + ins.offset;
713 } else {
714 csum_start = ins.objectid +
715 btrfs_file_extent_offset(eb, item);
716 csum_end = csum_start +
717 btrfs_file_extent_num_bytes(eb, item);
718 }
719
720 ret = btrfs_lookup_csums_range(root->log_root,
721 csum_start, csum_end - 1,
a2de733c 722 &ordered_sums, 0);
3650860b
JB
723 if (ret)
724 goto out;
07d400a6
YZ
725 while (!list_empty(&ordered_sums)) {
726 struct btrfs_ordered_sum *sums;
727 sums = list_entry(ordered_sums.next,
728 struct btrfs_ordered_sum,
729 list);
3650860b
JB
730 if (!ret)
731 ret = btrfs_csum_file_blocks(trans,
07d400a6
YZ
732 root->fs_info->csum_root,
733 sums);
07d400a6
YZ
734 list_del(&sums->list);
735 kfree(sums);
736 }
3650860b
JB
737 if (ret)
738 goto out;
07d400a6 739 } else {
b3b4aa74 740 btrfs_release_path(path);
07d400a6
YZ
741 }
742 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
743 /* inline extents are easy, we just overwrite them */
744 ret = overwrite_item(trans, root, path, eb, slot, key);
3650860b
JB
745 if (ret)
746 goto out;
07d400a6 747 }
e02119d5 748
4bc4bee4 749 inode_add_bytes(inode, nbytes);
b9959295 750 ret = btrfs_update_inode(trans, root, inode);
e02119d5
CM
751out:
752 if (inode)
753 iput(inode);
754 return ret;
755}
756
757/*
758 * when cleaning up conflicts between the directory names in the
759 * subvolume, directory names in the log and directory names in the
760 * inode back references, we may have to unlink inodes from directories.
761 *
762 * This is a helper function to do the unlink of a specific directory
763 * item
764 */
765static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
766 struct btrfs_root *root,
767 struct btrfs_path *path,
768 struct inode *dir,
769 struct btrfs_dir_item *di)
770{
771 struct inode *inode;
772 char *name;
773 int name_len;
774 struct extent_buffer *leaf;
775 struct btrfs_key location;
776 int ret;
777
778 leaf = path->nodes[0];
779
780 btrfs_dir_item_key_to_cpu(leaf, di, &location);
781 name_len = btrfs_dir_name_len(leaf, di);
782 name = kmalloc(name_len, GFP_NOFS);
2a29edc6 783 if (!name)
784 return -ENOMEM;
785
e02119d5 786 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
b3b4aa74 787 btrfs_release_path(path);
e02119d5
CM
788
789 inode = read_one_inode(root, location.objectid);
c00e9493 790 if (!inode) {
3650860b
JB
791 ret = -EIO;
792 goto out;
c00e9493 793 }
e02119d5 794
ec051c0f 795 ret = link_to_fixup_dir(trans, root, path, location.objectid);
3650860b
JB
796 if (ret)
797 goto out;
12fcfd22 798
e02119d5 799 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3650860b
JB
800 if (ret)
801 goto out;
ada9af21
FDBM
802 else
803 ret = btrfs_run_delayed_items(trans, root);
3650860b 804out:
e02119d5 805 kfree(name);
e02119d5
CM
806 iput(inode);
807 return ret;
808}
809
810/*
811 * helper function to see if a given name and sequence number found
812 * in an inode back reference are already in a directory and correctly
813 * point to this inode
814 */
815static noinline int inode_in_dir(struct btrfs_root *root,
816 struct btrfs_path *path,
817 u64 dirid, u64 objectid, u64 index,
818 const char *name, int name_len)
819{
820 struct btrfs_dir_item *di;
821 struct btrfs_key location;
822 int match = 0;
823
824 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
825 index, name, name_len, 0);
826 if (di && !IS_ERR(di)) {
827 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
828 if (location.objectid != objectid)
829 goto out;
830 } else
831 goto out;
b3b4aa74 832 btrfs_release_path(path);
e02119d5
CM
833
834 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
835 if (di && !IS_ERR(di)) {
836 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
837 if (location.objectid != objectid)
838 goto out;
839 } else
840 goto out;
841 match = 1;
842out:
b3b4aa74 843 btrfs_release_path(path);
e02119d5
CM
844 return match;
845}
846
847/*
848 * helper function to check a log tree for a named back reference in
849 * an inode. This is used to decide if a back reference that is
850 * found in the subvolume conflicts with what we find in the log.
851 *
852 * inode backreferences may have multiple refs in a single item,
853 * during replay we process one reference at a time, and we don't
854 * want to delete valid links to a file from the subvolume if that
855 * link is also in the log.
856 */
857static noinline int backref_in_log(struct btrfs_root *log,
858 struct btrfs_key *key,
f186373f 859 u64 ref_objectid,
df8d116f 860 const char *name, int namelen)
e02119d5
CM
861{
862 struct btrfs_path *path;
863 struct btrfs_inode_ref *ref;
864 unsigned long ptr;
865 unsigned long ptr_end;
866 unsigned long name_ptr;
867 int found_name_len;
868 int item_size;
869 int ret;
870 int match = 0;
871
872 path = btrfs_alloc_path();
2a29edc6 873 if (!path)
874 return -ENOMEM;
875
e02119d5
CM
876 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
877 if (ret != 0)
878 goto out;
879
e02119d5 880 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
f186373f
MF
881
882 if (key->type == BTRFS_INODE_EXTREF_KEY) {
883 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
884 name, namelen, NULL))
885 match = 1;
886
887 goto out;
888 }
889
890 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
e02119d5
CM
891 ptr_end = ptr + item_size;
892 while (ptr < ptr_end) {
893 ref = (struct btrfs_inode_ref *)ptr;
894 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
895 if (found_name_len == namelen) {
896 name_ptr = (unsigned long)(ref + 1);
897 ret = memcmp_extent_buffer(path->nodes[0], name,
898 name_ptr, namelen);
899 if (ret == 0) {
900 match = 1;
901 goto out;
902 }
903 }
904 ptr = (unsigned long)(ref + 1) + found_name_len;
905 }
906out:
907 btrfs_free_path(path);
908 return match;
909}
910
5a1d7843 911static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
e02119d5 912 struct btrfs_root *root,
e02119d5 913 struct btrfs_path *path,
5a1d7843
JS
914 struct btrfs_root *log_root,
915 struct inode *dir, struct inode *inode,
5a1d7843 916 struct extent_buffer *eb,
f186373f
MF
917 u64 inode_objectid, u64 parent_objectid,
918 u64 ref_index, char *name, int namelen,
919 int *search_done)
e02119d5 920{
34f3e4f2 921 int ret;
f186373f
MF
922 char *victim_name;
923 int victim_name_len;
924 struct extent_buffer *leaf;
5a1d7843 925 struct btrfs_dir_item *di;
f186373f
MF
926 struct btrfs_key search_key;
927 struct btrfs_inode_extref *extref;
c622ae60 928
f186373f
MF
929again:
930 /* Search old style refs */
931 search_key.objectid = inode_objectid;
932 search_key.type = BTRFS_INODE_REF_KEY;
933 search_key.offset = parent_objectid;
934 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
e02119d5 935 if (ret == 0) {
e02119d5
CM
936 struct btrfs_inode_ref *victim_ref;
937 unsigned long ptr;
938 unsigned long ptr_end;
f186373f
MF
939
940 leaf = path->nodes[0];
e02119d5
CM
941
942 /* are we trying to overwrite a back ref for the root directory
943 * if so, just jump out, we're done
944 */
f186373f 945 if (search_key.objectid == search_key.offset)
5a1d7843 946 return 1;
e02119d5
CM
947
948 /* check all the names in this back reference to see
949 * if they are in the log. if so, we allow them to stay
950 * otherwise they must be unlinked as a conflict
951 */
952 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
953 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
d397712b 954 while (ptr < ptr_end) {
e02119d5
CM
955 victim_ref = (struct btrfs_inode_ref *)ptr;
956 victim_name_len = btrfs_inode_ref_name_len(leaf,
957 victim_ref);
958 victim_name = kmalloc(victim_name_len, GFP_NOFS);
3650860b
JB
959 if (!victim_name)
960 return -ENOMEM;
e02119d5
CM
961
962 read_extent_buffer(leaf, victim_name,
963 (unsigned long)(victim_ref + 1),
964 victim_name_len);
965
f186373f
MF
966 if (!backref_in_log(log_root, &search_key,
967 parent_objectid,
968 victim_name,
e02119d5 969 victim_name_len)) {
8b558c5f 970 inc_nlink(inode);
b3b4aa74 971 btrfs_release_path(path);
12fcfd22 972
e02119d5
CM
973 ret = btrfs_unlink_inode(trans, root, dir,
974 inode, victim_name,
975 victim_name_len);
f186373f 976 kfree(victim_name);
3650860b
JB
977 if (ret)
978 return ret;
ada9af21
FDBM
979 ret = btrfs_run_delayed_items(trans, root);
980 if (ret)
981 return ret;
f186373f
MF
982 *search_done = 1;
983 goto again;
e02119d5
CM
984 }
985 kfree(victim_name);
f186373f 986
e02119d5
CM
987 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
988 }
e02119d5 989
c622ae60 990 /*
991 * NOTE: we have searched root tree and checked the
992 * coresponding ref, it does not need to check again.
993 */
5a1d7843 994 *search_done = 1;
e02119d5 995 }
b3b4aa74 996 btrfs_release_path(path);
e02119d5 997
f186373f
MF
998 /* Same search but for extended refs */
999 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1000 inode_objectid, parent_objectid, 0,
1001 0);
1002 if (!IS_ERR_OR_NULL(extref)) {
1003 u32 item_size;
1004 u32 cur_offset = 0;
1005 unsigned long base;
1006 struct inode *victim_parent;
1007
1008 leaf = path->nodes[0];
1009
1010 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1011 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1012
1013 while (cur_offset < item_size) {
dd9ef135 1014 extref = (struct btrfs_inode_extref *)(base + cur_offset);
f186373f
MF
1015
1016 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1017
1018 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1019 goto next;
1020
1021 victim_name = kmalloc(victim_name_len, GFP_NOFS);
3650860b
JB
1022 if (!victim_name)
1023 return -ENOMEM;
f186373f
MF
1024 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1025 victim_name_len);
1026
1027 search_key.objectid = inode_objectid;
1028 search_key.type = BTRFS_INODE_EXTREF_KEY;
1029 search_key.offset = btrfs_extref_hash(parent_objectid,
1030 victim_name,
1031 victim_name_len);
1032 ret = 0;
1033 if (!backref_in_log(log_root, &search_key,
1034 parent_objectid, victim_name,
1035 victim_name_len)) {
1036 ret = -ENOENT;
1037 victim_parent = read_one_inode(root,
1038 parent_objectid);
1039 if (victim_parent) {
8b558c5f 1040 inc_nlink(inode);
f186373f
MF
1041 btrfs_release_path(path);
1042
1043 ret = btrfs_unlink_inode(trans, root,
1044 victim_parent,
1045 inode,
1046 victim_name,
1047 victim_name_len);
ada9af21
FDBM
1048 if (!ret)
1049 ret = btrfs_run_delayed_items(
1050 trans, root);
f186373f 1051 }
f186373f
MF
1052 iput(victim_parent);
1053 kfree(victim_name);
3650860b
JB
1054 if (ret)
1055 return ret;
f186373f
MF
1056 *search_done = 1;
1057 goto again;
1058 }
1059 kfree(victim_name);
3650860b
JB
1060 if (ret)
1061 return ret;
f186373f
MF
1062next:
1063 cur_offset += victim_name_len + sizeof(*extref);
1064 }
1065 *search_done = 1;
1066 }
1067 btrfs_release_path(path);
1068
34f3e4f2 1069 /* look for a conflicting sequence number */
1070 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
f186373f 1071 ref_index, name, namelen, 0);
34f3e4f2 1072 if (di && !IS_ERR(di)) {
1073 ret = drop_one_dir_item(trans, root, path, dir, di);
3650860b
JB
1074 if (ret)
1075 return ret;
34f3e4f2 1076 }
1077 btrfs_release_path(path);
1078
1079 /* look for a conflicing name */
1080 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1081 name, namelen, 0);
1082 if (di && !IS_ERR(di)) {
1083 ret = drop_one_dir_item(trans, root, path, dir, di);
3650860b
JB
1084 if (ret)
1085 return ret;
34f3e4f2 1086 }
1087 btrfs_release_path(path);
1088
5a1d7843
JS
1089 return 0;
1090}
e02119d5 1091
f186373f
MF
1092static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1093 u32 *namelen, char **name, u64 *index,
1094 u64 *parent_objectid)
1095{
1096 struct btrfs_inode_extref *extref;
1097
1098 extref = (struct btrfs_inode_extref *)ref_ptr;
1099
1100 *namelen = btrfs_inode_extref_name_len(eb, extref);
1101 *name = kmalloc(*namelen, GFP_NOFS);
1102 if (*name == NULL)
1103 return -ENOMEM;
1104
1105 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1106 *namelen);
1107
1108 *index = btrfs_inode_extref_index(eb, extref);
1109 if (parent_objectid)
1110 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1111
1112 return 0;
1113}
1114
1115static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1116 u32 *namelen, char **name, u64 *index)
1117{
1118 struct btrfs_inode_ref *ref;
1119
1120 ref = (struct btrfs_inode_ref *)ref_ptr;
1121
1122 *namelen = btrfs_inode_ref_name_len(eb, ref);
1123 *name = kmalloc(*namelen, GFP_NOFS);
1124 if (*name == NULL)
1125 return -ENOMEM;
1126
1127 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1128
1129 *index = btrfs_inode_ref_index(eb, ref);
1130
1131 return 0;
1132}
1133
5a1d7843
JS
1134/*
1135 * replay one inode back reference item found in the log tree.
1136 * eb, slot and key refer to the buffer and key found in the log tree.
1137 * root is the destination we are replaying into, and path is for temp
1138 * use by this function. (it should be released on return).
1139 */
1140static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1141 struct btrfs_root *root,
1142 struct btrfs_root *log,
1143 struct btrfs_path *path,
1144 struct extent_buffer *eb, int slot,
1145 struct btrfs_key *key)
1146{
03b2f08b
GB
1147 struct inode *dir = NULL;
1148 struct inode *inode = NULL;
5a1d7843
JS
1149 unsigned long ref_ptr;
1150 unsigned long ref_end;
03b2f08b 1151 char *name = NULL;
5a1d7843
JS
1152 int namelen;
1153 int ret;
1154 int search_done = 0;
f186373f
MF
1155 int log_ref_ver = 0;
1156 u64 parent_objectid;
1157 u64 inode_objectid;
f46dbe3d 1158 u64 ref_index = 0;
f186373f
MF
1159 int ref_struct_size;
1160
1161 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1162 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1163
1164 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1165 struct btrfs_inode_extref *r;
1166
1167 ref_struct_size = sizeof(struct btrfs_inode_extref);
1168 log_ref_ver = 1;
1169 r = (struct btrfs_inode_extref *)ref_ptr;
1170 parent_objectid = btrfs_inode_extref_parent(eb, r);
1171 } else {
1172 ref_struct_size = sizeof(struct btrfs_inode_ref);
1173 parent_objectid = key->offset;
1174 }
1175 inode_objectid = key->objectid;
e02119d5 1176
5a1d7843
JS
1177 /*
1178 * it is possible that we didn't log all the parent directories
1179 * for a given inode. If we don't find the dir, just don't
1180 * copy the back ref in. The link count fixup code will take
1181 * care of the rest
1182 */
f186373f 1183 dir = read_one_inode(root, parent_objectid);
03b2f08b
GB
1184 if (!dir) {
1185 ret = -ENOENT;
1186 goto out;
1187 }
5a1d7843 1188
f186373f 1189 inode = read_one_inode(root, inode_objectid);
5a1d7843 1190 if (!inode) {
03b2f08b
GB
1191 ret = -EIO;
1192 goto out;
5a1d7843
JS
1193 }
1194
5a1d7843 1195 while (ref_ptr < ref_end) {
f186373f
MF
1196 if (log_ref_ver) {
1197 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1198 &ref_index, &parent_objectid);
1199 /*
1200 * parent object can change from one array
1201 * item to another.
1202 */
1203 if (!dir)
1204 dir = read_one_inode(root, parent_objectid);
03b2f08b
GB
1205 if (!dir) {
1206 ret = -ENOENT;
1207 goto out;
1208 }
f186373f
MF
1209 } else {
1210 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1211 &ref_index);
1212 }
1213 if (ret)
03b2f08b 1214 goto out;
5a1d7843
JS
1215
1216 /* if we already have a perfect match, we're done */
1217 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
f186373f 1218 ref_index, name, namelen)) {
5a1d7843
JS
1219 /*
1220 * look for a conflicting back reference in the
1221 * metadata. if we find one we have to unlink that name
1222 * of the file before we add our new link. Later on, we
1223 * overwrite any existing back reference, and we don't
1224 * want to create dangling pointers in the directory.
1225 */
1226
1227 if (!search_done) {
1228 ret = __add_inode_ref(trans, root, path, log,
f186373f
MF
1229 dir, inode, eb,
1230 inode_objectid,
1231 parent_objectid,
1232 ref_index, name, namelen,
5a1d7843 1233 &search_done);
03b2f08b
GB
1234 if (ret) {
1235 if (ret == 1)
1236 ret = 0;
3650860b
JB
1237 goto out;
1238 }
5a1d7843
JS
1239 }
1240
1241 /* insert our name */
1242 ret = btrfs_add_link(trans, dir, inode, name, namelen,
f186373f 1243 0, ref_index);
3650860b
JB
1244 if (ret)
1245 goto out;
5a1d7843
JS
1246
1247 btrfs_update_inode(trans, root, inode);
1248 }
1249
f186373f 1250 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
5a1d7843 1251 kfree(name);
03b2f08b 1252 name = NULL;
f186373f
MF
1253 if (log_ref_ver) {
1254 iput(dir);
1255 dir = NULL;
1256 }
5a1d7843 1257 }
e02119d5
CM
1258
1259 /* finally write the back reference in the inode */
1260 ret = overwrite_item(trans, root, path, eb, slot, key);
5a1d7843 1261out:
b3b4aa74 1262 btrfs_release_path(path);
03b2f08b 1263 kfree(name);
e02119d5
CM
1264 iput(dir);
1265 iput(inode);
3650860b 1266 return ret;
e02119d5
CM
1267}
1268
c71bf099 1269static int insert_orphan_item(struct btrfs_trans_handle *trans,
9c4f61f0 1270 struct btrfs_root *root, u64 ino)
c71bf099
YZ
1271{
1272 int ret;
381cf658 1273
9c4f61f0
DS
1274 ret = btrfs_insert_orphan_item(trans, root, ino);
1275 if (ret == -EEXIST)
1276 ret = 0;
381cf658 1277
c71bf099
YZ
1278 return ret;
1279}
1280
f186373f
MF
1281static int count_inode_extrefs(struct btrfs_root *root,
1282 struct inode *inode, struct btrfs_path *path)
1283{
1284 int ret = 0;
1285 int name_len;
1286 unsigned int nlink = 0;
1287 u32 item_size;
1288 u32 cur_offset = 0;
1289 u64 inode_objectid = btrfs_ino(inode);
1290 u64 offset = 0;
1291 unsigned long ptr;
1292 struct btrfs_inode_extref *extref;
1293 struct extent_buffer *leaf;
1294
1295 while (1) {
1296 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1297 &extref, &offset);
1298 if (ret)
1299 break;
c71bf099 1300
f186373f
MF
1301 leaf = path->nodes[0];
1302 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1303 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2c2c452b 1304 cur_offset = 0;
f186373f
MF
1305
1306 while (cur_offset < item_size) {
1307 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1308 name_len = btrfs_inode_extref_name_len(leaf, extref);
1309
1310 nlink++;
1311
1312 cur_offset += name_len + sizeof(*extref);
1313 }
1314
1315 offset++;
1316 btrfs_release_path(path);
1317 }
1318 btrfs_release_path(path);
1319
2c2c452b 1320 if (ret < 0 && ret != -ENOENT)
f186373f
MF
1321 return ret;
1322 return nlink;
1323}
1324
1325static int count_inode_refs(struct btrfs_root *root,
1326 struct inode *inode, struct btrfs_path *path)
e02119d5 1327{
e02119d5
CM
1328 int ret;
1329 struct btrfs_key key;
f186373f 1330 unsigned int nlink = 0;
e02119d5
CM
1331 unsigned long ptr;
1332 unsigned long ptr_end;
1333 int name_len;
33345d01 1334 u64 ino = btrfs_ino(inode);
e02119d5 1335
33345d01 1336 key.objectid = ino;
e02119d5
CM
1337 key.type = BTRFS_INODE_REF_KEY;
1338 key.offset = (u64)-1;
1339
d397712b 1340 while (1) {
e02119d5
CM
1341 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1342 if (ret < 0)
1343 break;
1344 if (ret > 0) {
1345 if (path->slots[0] == 0)
1346 break;
1347 path->slots[0]--;
1348 }
e93ae26f 1349process_slot:
e02119d5
CM
1350 btrfs_item_key_to_cpu(path->nodes[0], &key,
1351 path->slots[0]);
33345d01 1352 if (key.objectid != ino ||
e02119d5
CM
1353 key.type != BTRFS_INODE_REF_KEY)
1354 break;
1355 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1356 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1357 path->slots[0]);
d397712b 1358 while (ptr < ptr_end) {
e02119d5
CM
1359 struct btrfs_inode_ref *ref;
1360
1361 ref = (struct btrfs_inode_ref *)ptr;
1362 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1363 ref);
1364 ptr = (unsigned long)(ref + 1) + name_len;
1365 nlink++;
1366 }
1367
1368 if (key.offset == 0)
1369 break;
e93ae26f
FDBM
1370 if (path->slots[0] > 0) {
1371 path->slots[0]--;
1372 goto process_slot;
1373 }
e02119d5 1374 key.offset--;
b3b4aa74 1375 btrfs_release_path(path);
e02119d5 1376 }
b3b4aa74 1377 btrfs_release_path(path);
f186373f
MF
1378
1379 return nlink;
1380}
1381
1382/*
1383 * There are a few corners where the link count of the file can't
1384 * be properly maintained during replay. So, instead of adding
1385 * lots of complexity to the log code, we just scan the backrefs
1386 * for any file that has been through replay.
1387 *
1388 * The scan will update the link count on the inode to reflect the
1389 * number of back refs found. If it goes down to zero, the iput
1390 * will free the inode.
1391 */
1392static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1393 struct btrfs_root *root,
1394 struct inode *inode)
1395{
1396 struct btrfs_path *path;
1397 int ret;
1398 u64 nlink = 0;
1399 u64 ino = btrfs_ino(inode);
1400
1401 path = btrfs_alloc_path();
1402 if (!path)
1403 return -ENOMEM;
1404
1405 ret = count_inode_refs(root, inode, path);
1406 if (ret < 0)
1407 goto out;
1408
1409 nlink = ret;
1410
1411 ret = count_inode_extrefs(root, inode, path);
f186373f
MF
1412 if (ret < 0)
1413 goto out;
1414
1415 nlink += ret;
1416
1417 ret = 0;
1418
e02119d5 1419 if (nlink != inode->i_nlink) {
bfe86848 1420 set_nlink(inode, nlink);
e02119d5
CM
1421 btrfs_update_inode(trans, root, inode);
1422 }
8d5bf1cb 1423 BTRFS_I(inode)->index_cnt = (u64)-1;
e02119d5 1424
c71bf099
YZ
1425 if (inode->i_nlink == 0) {
1426 if (S_ISDIR(inode->i_mode)) {
1427 ret = replay_dir_deletes(trans, root, NULL, path,
33345d01 1428 ino, 1);
3650860b
JB
1429 if (ret)
1430 goto out;
c71bf099 1431 }
33345d01 1432 ret = insert_orphan_item(trans, root, ino);
12fcfd22 1433 }
12fcfd22 1434
f186373f
MF
1435out:
1436 btrfs_free_path(path);
1437 return ret;
e02119d5
CM
1438}
1439
1440static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1441 struct btrfs_root *root,
1442 struct btrfs_path *path)
1443{
1444 int ret;
1445 struct btrfs_key key;
1446 struct inode *inode;
1447
1448 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1449 key.type = BTRFS_ORPHAN_ITEM_KEY;
1450 key.offset = (u64)-1;
d397712b 1451 while (1) {
e02119d5
CM
1452 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1453 if (ret < 0)
1454 break;
1455
1456 if (ret == 1) {
1457 if (path->slots[0] == 0)
1458 break;
1459 path->slots[0]--;
1460 }
1461
1462 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1463 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1464 key.type != BTRFS_ORPHAN_ITEM_KEY)
1465 break;
1466
1467 ret = btrfs_del_item(trans, root, path);
65a246c5
TI
1468 if (ret)
1469 goto out;
e02119d5 1470
b3b4aa74 1471 btrfs_release_path(path);
e02119d5 1472 inode = read_one_inode(root, key.offset);
c00e9493
TI
1473 if (!inode)
1474 return -EIO;
e02119d5
CM
1475
1476 ret = fixup_inode_link_count(trans, root, inode);
e02119d5 1477 iput(inode);
3650860b
JB
1478 if (ret)
1479 goto out;
e02119d5 1480
12fcfd22
CM
1481 /*
1482 * fixup on a directory may create new entries,
1483 * make sure we always look for the highset possible
1484 * offset
1485 */
1486 key.offset = (u64)-1;
e02119d5 1487 }
65a246c5
TI
1488 ret = 0;
1489out:
b3b4aa74 1490 btrfs_release_path(path);
65a246c5 1491 return ret;
e02119d5
CM
1492}
1493
1494
1495/*
1496 * record a given inode in the fixup dir so we can check its link
1497 * count when replay is done. The link count is incremented here
1498 * so the inode won't go away until we check it
1499 */
1500static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1501 struct btrfs_root *root,
1502 struct btrfs_path *path,
1503 u64 objectid)
1504{
1505 struct btrfs_key key;
1506 int ret = 0;
1507 struct inode *inode;
1508
1509 inode = read_one_inode(root, objectid);
c00e9493
TI
1510 if (!inode)
1511 return -EIO;
e02119d5
CM
1512
1513 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
962a298f 1514 key.type = BTRFS_ORPHAN_ITEM_KEY;
e02119d5
CM
1515 key.offset = objectid;
1516
1517 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1518
b3b4aa74 1519 btrfs_release_path(path);
e02119d5 1520 if (ret == 0) {
9bf7a489
JB
1521 if (!inode->i_nlink)
1522 set_nlink(inode, 1);
1523 else
8b558c5f 1524 inc_nlink(inode);
b9959295 1525 ret = btrfs_update_inode(trans, root, inode);
e02119d5
CM
1526 } else if (ret == -EEXIST) {
1527 ret = 0;
1528 } else {
3650860b 1529 BUG(); /* Logic Error */
e02119d5
CM
1530 }
1531 iput(inode);
1532
1533 return ret;
1534}
1535
1536/*
1537 * when replaying the log for a directory, we only insert names
1538 * for inodes that actually exist. This means an fsync on a directory
1539 * does not implicitly fsync all the new files in it
1540 */
1541static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1542 struct btrfs_root *root,
e02119d5 1543 u64 dirid, u64 index,
60d53eb3 1544 char *name, int name_len,
e02119d5
CM
1545 struct btrfs_key *location)
1546{
1547 struct inode *inode;
1548 struct inode *dir;
1549 int ret;
1550
1551 inode = read_one_inode(root, location->objectid);
1552 if (!inode)
1553 return -ENOENT;
1554
1555 dir = read_one_inode(root, dirid);
1556 if (!dir) {
1557 iput(inode);
1558 return -EIO;
1559 }
d555438b 1560
e02119d5
CM
1561 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1562
1563 /* FIXME, put inode into FIXUP list */
1564
1565 iput(inode);
1566 iput(dir);
1567 return ret;
1568}
1569
df8d116f
FM
1570/*
1571 * Return true if an inode reference exists in the log for the given name,
1572 * inode and parent inode.
1573 */
1574static bool name_in_log_ref(struct btrfs_root *log_root,
1575 const char *name, const int name_len,
1576 const u64 dirid, const u64 ino)
1577{
1578 struct btrfs_key search_key;
1579
1580 search_key.objectid = ino;
1581 search_key.type = BTRFS_INODE_REF_KEY;
1582 search_key.offset = dirid;
1583 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1584 return true;
1585
1586 search_key.type = BTRFS_INODE_EXTREF_KEY;
1587 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1588 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1589 return true;
1590
1591 return false;
1592}
1593
e02119d5
CM
1594/*
1595 * take a single entry in a log directory item and replay it into
1596 * the subvolume.
1597 *
1598 * if a conflicting item exists in the subdirectory already,
1599 * the inode it points to is unlinked and put into the link count
1600 * fix up tree.
1601 *
1602 * If a name from the log points to a file or directory that does
1603 * not exist in the FS, it is skipped. fsyncs on directories
1604 * do not force down inodes inside that directory, just changes to the
1605 * names or unlinks in a directory.
bb53eda9
FM
1606 *
1607 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1608 * non-existing inode) and 1 if the name was replayed.
e02119d5
CM
1609 */
1610static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1611 struct btrfs_root *root,
1612 struct btrfs_path *path,
1613 struct extent_buffer *eb,
1614 struct btrfs_dir_item *di,
1615 struct btrfs_key *key)
1616{
1617 char *name;
1618 int name_len;
1619 struct btrfs_dir_item *dst_di;
1620 struct btrfs_key found_key;
1621 struct btrfs_key log_key;
1622 struct inode *dir;
e02119d5 1623 u8 log_type;
4bef0848 1624 int exists;
3650860b 1625 int ret = 0;
d555438b 1626 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
bb53eda9 1627 bool name_added = false;
e02119d5
CM
1628
1629 dir = read_one_inode(root, key->objectid);
c00e9493
TI
1630 if (!dir)
1631 return -EIO;
e02119d5
CM
1632
1633 name_len = btrfs_dir_name_len(eb, di);
1634 name = kmalloc(name_len, GFP_NOFS);
2bac325e
FDBM
1635 if (!name) {
1636 ret = -ENOMEM;
1637 goto out;
1638 }
2a29edc6 1639
e02119d5
CM
1640 log_type = btrfs_dir_type(eb, di);
1641 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1642 name_len);
1643
1644 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
4bef0848
CM
1645 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1646 if (exists == 0)
1647 exists = 1;
1648 else
1649 exists = 0;
b3b4aa74 1650 btrfs_release_path(path);
4bef0848 1651
e02119d5
CM
1652 if (key->type == BTRFS_DIR_ITEM_KEY) {
1653 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1654 name, name_len, 1);
d397712b 1655 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
e02119d5
CM
1656 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1657 key->objectid,
1658 key->offset, name,
1659 name_len, 1);
1660 } else {
3650860b
JB
1661 /* Corruption */
1662 ret = -EINVAL;
1663 goto out;
e02119d5 1664 }
c704005d 1665 if (IS_ERR_OR_NULL(dst_di)) {
e02119d5
CM
1666 /* we need a sequence number to insert, so we only
1667 * do inserts for the BTRFS_DIR_INDEX_KEY types
1668 */
1669 if (key->type != BTRFS_DIR_INDEX_KEY)
1670 goto out;
1671 goto insert;
1672 }
1673
1674 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1675 /* the existing item matches the logged item */
1676 if (found_key.objectid == log_key.objectid &&
1677 found_key.type == log_key.type &&
1678 found_key.offset == log_key.offset &&
1679 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
a2cc11db 1680 update_size = false;
e02119d5
CM
1681 goto out;
1682 }
1683
1684 /*
1685 * don't drop the conflicting directory entry if the inode
1686 * for the new entry doesn't exist
1687 */
4bef0848 1688 if (!exists)
e02119d5
CM
1689 goto out;
1690
e02119d5 1691 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
3650860b
JB
1692 if (ret)
1693 goto out;
e02119d5
CM
1694
1695 if (key->type == BTRFS_DIR_INDEX_KEY)
1696 goto insert;
1697out:
b3b4aa74 1698 btrfs_release_path(path);
d555438b
JB
1699 if (!ret && update_size) {
1700 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1701 ret = btrfs_update_inode(trans, root, dir);
1702 }
e02119d5
CM
1703 kfree(name);
1704 iput(dir);
bb53eda9
FM
1705 if (!ret && name_added)
1706 ret = 1;
3650860b 1707 return ret;
e02119d5
CM
1708
1709insert:
df8d116f
FM
1710 if (name_in_log_ref(root->log_root, name, name_len,
1711 key->objectid, log_key.objectid)) {
1712 /* The dentry will be added later. */
1713 ret = 0;
1714 update_size = false;
1715 goto out;
1716 }
b3b4aa74 1717 btrfs_release_path(path);
60d53eb3
Z
1718 ret = insert_one_name(trans, root, key->objectid, key->offset,
1719 name, name_len, &log_key);
df8d116f 1720 if (ret && ret != -ENOENT && ret != -EEXIST)
3650860b 1721 goto out;
bb53eda9
FM
1722 if (!ret)
1723 name_added = true;
d555438b 1724 update_size = false;
3650860b 1725 ret = 0;
e02119d5
CM
1726 goto out;
1727}
1728
1729/*
1730 * find all the names in a directory item and reconcile them into
1731 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1732 * one name in a directory item, but the same code gets used for
1733 * both directory index types
1734 */
1735static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1736 struct btrfs_root *root,
1737 struct btrfs_path *path,
1738 struct extent_buffer *eb, int slot,
1739 struct btrfs_key *key)
1740{
bb53eda9 1741 int ret = 0;
e02119d5
CM
1742 u32 item_size = btrfs_item_size_nr(eb, slot);
1743 struct btrfs_dir_item *di;
1744 int name_len;
1745 unsigned long ptr;
1746 unsigned long ptr_end;
bb53eda9 1747 struct btrfs_path *fixup_path = NULL;
e02119d5
CM
1748
1749 ptr = btrfs_item_ptr_offset(eb, slot);
1750 ptr_end = ptr + item_size;
d397712b 1751 while (ptr < ptr_end) {
e02119d5 1752 di = (struct btrfs_dir_item *)ptr;
22a94d44
JB
1753 if (verify_dir_item(root, eb, di))
1754 return -EIO;
e02119d5
CM
1755 name_len = btrfs_dir_name_len(eb, di);
1756 ret = replay_one_name(trans, root, path, eb, di, key);
bb53eda9
FM
1757 if (ret < 0)
1758 break;
e02119d5
CM
1759 ptr = (unsigned long)(di + 1);
1760 ptr += name_len;
bb53eda9
FM
1761
1762 /*
1763 * If this entry refers to a non-directory (directories can not
1764 * have a link count > 1) and it was added in the transaction
1765 * that was not committed, make sure we fixup the link count of
1766 * the inode it the entry points to. Otherwise something like
1767 * the following would result in a directory pointing to an
1768 * inode with a wrong link that does not account for this dir
1769 * entry:
1770 *
1771 * mkdir testdir
1772 * touch testdir/foo
1773 * touch testdir/bar
1774 * sync
1775 *
1776 * ln testdir/bar testdir/bar_link
1777 * ln testdir/foo testdir/foo_link
1778 * xfs_io -c "fsync" testdir/bar
1779 *
1780 * <power failure>
1781 *
1782 * mount fs, log replay happens
1783 *
1784 * File foo would remain with a link count of 1 when it has two
1785 * entries pointing to it in the directory testdir. This would
1786 * make it impossible to ever delete the parent directory has
1787 * it would result in stale dentries that can never be deleted.
1788 */
1789 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1790 struct btrfs_key di_key;
1791
1792 if (!fixup_path) {
1793 fixup_path = btrfs_alloc_path();
1794 if (!fixup_path) {
1795 ret = -ENOMEM;
1796 break;
1797 }
1798 }
1799
1800 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1801 ret = link_to_fixup_dir(trans, root, fixup_path,
1802 di_key.objectid);
1803 if (ret)
1804 break;
1805 }
1806 ret = 0;
e02119d5 1807 }
bb53eda9
FM
1808 btrfs_free_path(fixup_path);
1809 return ret;
e02119d5
CM
1810}
1811
1812/*
1813 * directory replay has two parts. There are the standard directory
1814 * items in the log copied from the subvolume, and range items
1815 * created in the log while the subvolume was logged.
1816 *
1817 * The range items tell us which parts of the key space the log
1818 * is authoritative for. During replay, if a key in the subvolume
1819 * directory is in a logged range item, but not actually in the log
1820 * that means it was deleted from the directory before the fsync
1821 * and should be removed.
1822 */
1823static noinline int find_dir_range(struct btrfs_root *root,
1824 struct btrfs_path *path,
1825 u64 dirid, int key_type,
1826 u64 *start_ret, u64 *end_ret)
1827{
1828 struct btrfs_key key;
1829 u64 found_end;
1830 struct btrfs_dir_log_item *item;
1831 int ret;
1832 int nritems;
1833
1834 if (*start_ret == (u64)-1)
1835 return 1;
1836
1837 key.objectid = dirid;
1838 key.type = key_type;
1839 key.offset = *start_ret;
1840
1841 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1842 if (ret < 0)
1843 goto out;
1844 if (ret > 0) {
1845 if (path->slots[0] == 0)
1846 goto out;
1847 path->slots[0]--;
1848 }
1849 if (ret != 0)
1850 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1851
1852 if (key.type != key_type || key.objectid != dirid) {
1853 ret = 1;
1854 goto next;
1855 }
1856 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1857 struct btrfs_dir_log_item);
1858 found_end = btrfs_dir_log_end(path->nodes[0], item);
1859
1860 if (*start_ret >= key.offset && *start_ret <= found_end) {
1861 ret = 0;
1862 *start_ret = key.offset;
1863 *end_ret = found_end;
1864 goto out;
1865 }
1866 ret = 1;
1867next:
1868 /* check the next slot in the tree to see if it is a valid item */
1869 nritems = btrfs_header_nritems(path->nodes[0]);
1870 if (path->slots[0] >= nritems) {
1871 ret = btrfs_next_leaf(root, path);
1872 if (ret)
1873 goto out;
1874 } else {
1875 path->slots[0]++;
1876 }
1877
1878 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1879
1880 if (key.type != key_type || key.objectid != dirid) {
1881 ret = 1;
1882 goto out;
1883 }
1884 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1885 struct btrfs_dir_log_item);
1886 found_end = btrfs_dir_log_end(path->nodes[0], item);
1887 *start_ret = key.offset;
1888 *end_ret = found_end;
1889 ret = 0;
1890out:
b3b4aa74 1891 btrfs_release_path(path);
e02119d5
CM
1892 return ret;
1893}
1894
1895/*
1896 * this looks for a given directory item in the log. If the directory
1897 * item is not in the log, the item is removed and the inode it points
1898 * to is unlinked
1899 */
1900static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1901 struct btrfs_root *root,
1902 struct btrfs_root *log,
1903 struct btrfs_path *path,
1904 struct btrfs_path *log_path,
1905 struct inode *dir,
1906 struct btrfs_key *dir_key)
1907{
1908 int ret;
1909 struct extent_buffer *eb;
1910 int slot;
1911 u32 item_size;
1912 struct btrfs_dir_item *di;
1913 struct btrfs_dir_item *log_di;
1914 int name_len;
1915 unsigned long ptr;
1916 unsigned long ptr_end;
1917 char *name;
1918 struct inode *inode;
1919 struct btrfs_key location;
1920
1921again:
1922 eb = path->nodes[0];
1923 slot = path->slots[0];
1924 item_size = btrfs_item_size_nr(eb, slot);
1925 ptr = btrfs_item_ptr_offset(eb, slot);
1926 ptr_end = ptr + item_size;
d397712b 1927 while (ptr < ptr_end) {
e02119d5 1928 di = (struct btrfs_dir_item *)ptr;
22a94d44
JB
1929 if (verify_dir_item(root, eb, di)) {
1930 ret = -EIO;
1931 goto out;
1932 }
1933
e02119d5
CM
1934 name_len = btrfs_dir_name_len(eb, di);
1935 name = kmalloc(name_len, GFP_NOFS);
1936 if (!name) {
1937 ret = -ENOMEM;
1938 goto out;
1939 }
1940 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1941 name_len);
1942 log_di = NULL;
12fcfd22 1943 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
e02119d5
CM
1944 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1945 dir_key->objectid,
1946 name, name_len, 0);
12fcfd22 1947 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
e02119d5
CM
1948 log_di = btrfs_lookup_dir_index_item(trans, log,
1949 log_path,
1950 dir_key->objectid,
1951 dir_key->offset,
1952 name, name_len, 0);
1953 }
269d040f 1954 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
e02119d5 1955 btrfs_dir_item_key_to_cpu(eb, di, &location);
b3b4aa74
DS
1956 btrfs_release_path(path);
1957 btrfs_release_path(log_path);
e02119d5 1958 inode = read_one_inode(root, location.objectid);
c00e9493
TI
1959 if (!inode) {
1960 kfree(name);
1961 return -EIO;
1962 }
e02119d5
CM
1963
1964 ret = link_to_fixup_dir(trans, root,
1965 path, location.objectid);
3650860b
JB
1966 if (ret) {
1967 kfree(name);
1968 iput(inode);
1969 goto out;
1970 }
1971
8b558c5f 1972 inc_nlink(inode);
e02119d5
CM
1973 ret = btrfs_unlink_inode(trans, root, dir, inode,
1974 name, name_len);
3650860b 1975 if (!ret)
ada9af21 1976 ret = btrfs_run_delayed_items(trans, root);
e02119d5
CM
1977 kfree(name);
1978 iput(inode);
3650860b
JB
1979 if (ret)
1980 goto out;
e02119d5
CM
1981
1982 /* there might still be more names under this key
1983 * check and repeat if required
1984 */
1985 ret = btrfs_search_slot(NULL, root, dir_key, path,
1986 0, 0);
1987 if (ret == 0)
1988 goto again;
1989 ret = 0;
1990 goto out;
269d040f
FDBM
1991 } else if (IS_ERR(log_di)) {
1992 kfree(name);
1993 return PTR_ERR(log_di);
e02119d5 1994 }
b3b4aa74 1995 btrfs_release_path(log_path);
e02119d5
CM
1996 kfree(name);
1997
1998 ptr = (unsigned long)(di + 1);
1999 ptr += name_len;
2000 }
2001 ret = 0;
2002out:
b3b4aa74
DS
2003 btrfs_release_path(path);
2004 btrfs_release_path(log_path);
e02119d5
CM
2005 return ret;
2006}
2007
4f764e51
FM
2008static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2009 struct btrfs_root *root,
2010 struct btrfs_root *log,
2011 struct btrfs_path *path,
2012 const u64 ino)
2013{
2014 struct btrfs_key search_key;
2015 struct btrfs_path *log_path;
2016 int i;
2017 int nritems;
2018 int ret;
2019
2020 log_path = btrfs_alloc_path();
2021 if (!log_path)
2022 return -ENOMEM;
2023
2024 search_key.objectid = ino;
2025 search_key.type = BTRFS_XATTR_ITEM_KEY;
2026 search_key.offset = 0;
2027again:
2028 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2029 if (ret < 0)
2030 goto out;
2031process_leaf:
2032 nritems = btrfs_header_nritems(path->nodes[0]);
2033 for (i = path->slots[0]; i < nritems; i++) {
2034 struct btrfs_key key;
2035 struct btrfs_dir_item *di;
2036 struct btrfs_dir_item *log_di;
2037 u32 total_size;
2038 u32 cur;
2039
2040 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2041 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2042 ret = 0;
2043 goto out;
2044 }
2045
2046 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2047 total_size = btrfs_item_size_nr(path->nodes[0], i);
2048 cur = 0;
2049 while (cur < total_size) {
2050 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2051 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2052 u32 this_len = sizeof(*di) + name_len + data_len;
2053 char *name;
2054
2055 name = kmalloc(name_len, GFP_NOFS);
2056 if (!name) {
2057 ret = -ENOMEM;
2058 goto out;
2059 }
2060 read_extent_buffer(path->nodes[0], name,
2061 (unsigned long)(di + 1), name_len);
2062
2063 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2064 name, name_len, 0);
2065 btrfs_release_path(log_path);
2066 if (!log_di) {
2067 /* Doesn't exist in log tree, so delete it. */
2068 btrfs_release_path(path);
2069 di = btrfs_lookup_xattr(trans, root, path, ino,
2070 name, name_len, -1);
2071 kfree(name);
2072 if (IS_ERR(di)) {
2073 ret = PTR_ERR(di);
2074 goto out;
2075 }
2076 ASSERT(di);
2077 ret = btrfs_delete_one_dir_name(trans, root,
2078 path, di);
2079 if (ret)
2080 goto out;
2081 btrfs_release_path(path);
2082 search_key = key;
2083 goto again;
2084 }
2085 kfree(name);
2086 if (IS_ERR(log_di)) {
2087 ret = PTR_ERR(log_di);
2088 goto out;
2089 }
2090 cur += this_len;
2091 di = (struct btrfs_dir_item *)((char *)di + this_len);
2092 }
2093 }
2094 ret = btrfs_next_leaf(root, path);
2095 if (ret > 0)
2096 ret = 0;
2097 else if (ret == 0)
2098 goto process_leaf;
2099out:
2100 btrfs_free_path(log_path);
2101 btrfs_release_path(path);
2102 return ret;
2103}
2104
2105
e02119d5
CM
2106/*
2107 * deletion replay happens before we copy any new directory items
2108 * out of the log or out of backreferences from inodes. It
2109 * scans the log to find ranges of keys that log is authoritative for,
2110 * and then scans the directory to find items in those ranges that are
2111 * not present in the log.
2112 *
2113 * Anything we don't find in the log is unlinked and removed from the
2114 * directory.
2115 */
2116static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2117 struct btrfs_root *root,
2118 struct btrfs_root *log,
2119 struct btrfs_path *path,
12fcfd22 2120 u64 dirid, int del_all)
e02119d5
CM
2121{
2122 u64 range_start;
2123 u64 range_end;
2124 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2125 int ret = 0;
2126 struct btrfs_key dir_key;
2127 struct btrfs_key found_key;
2128 struct btrfs_path *log_path;
2129 struct inode *dir;
2130
2131 dir_key.objectid = dirid;
2132 dir_key.type = BTRFS_DIR_ITEM_KEY;
2133 log_path = btrfs_alloc_path();
2134 if (!log_path)
2135 return -ENOMEM;
2136
2137 dir = read_one_inode(root, dirid);
2138 /* it isn't an error if the inode isn't there, that can happen
2139 * because we replay the deletes before we copy in the inode item
2140 * from the log
2141 */
2142 if (!dir) {
2143 btrfs_free_path(log_path);
2144 return 0;
2145 }
2146again:
2147 range_start = 0;
2148 range_end = 0;
d397712b 2149 while (1) {
12fcfd22
CM
2150 if (del_all)
2151 range_end = (u64)-1;
2152 else {
2153 ret = find_dir_range(log, path, dirid, key_type,
2154 &range_start, &range_end);
2155 if (ret != 0)
2156 break;
2157 }
e02119d5
CM
2158
2159 dir_key.offset = range_start;
d397712b 2160 while (1) {
e02119d5
CM
2161 int nritems;
2162 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2163 0, 0);
2164 if (ret < 0)
2165 goto out;
2166
2167 nritems = btrfs_header_nritems(path->nodes[0]);
2168 if (path->slots[0] >= nritems) {
2169 ret = btrfs_next_leaf(root, path);
2170 if (ret)
2171 break;
2172 }
2173 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2174 path->slots[0]);
2175 if (found_key.objectid != dirid ||
2176 found_key.type != dir_key.type)
2177 goto next_type;
2178
2179 if (found_key.offset > range_end)
2180 break;
2181
2182 ret = check_item_in_log(trans, root, log, path,
12fcfd22
CM
2183 log_path, dir,
2184 &found_key);
3650860b
JB
2185 if (ret)
2186 goto out;
e02119d5
CM
2187 if (found_key.offset == (u64)-1)
2188 break;
2189 dir_key.offset = found_key.offset + 1;
2190 }
b3b4aa74 2191 btrfs_release_path(path);
e02119d5
CM
2192 if (range_end == (u64)-1)
2193 break;
2194 range_start = range_end + 1;
2195 }
2196
2197next_type:
2198 ret = 0;
2199 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2200 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2201 dir_key.type = BTRFS_DIR_INDEX_KEY;
b3b4aa74 2202 btrfs_release_path(path);
e02119d5
CM
2203 goto again;
2204 }
2205out:
b3b4aa74 2206 btrfs_release_path(path);
e02119d5
CM
2207 btrfs_free_path(log_path);
2208 iput(dir);
2209 return ret;
2210}
2211
2212/*
2213 * the process_func used to replay items from the log tree. This
2214 * gets called in two different stages. The first stage just looks
2215 * for inodes and makes sure they are all copied into the subvolume.
2216 *
2217 * The second stage copies all the other item types from the log into
2218 * the subvolume. The two stage approach is slower, but gets rid of
2219 * lots of complexity around inodes referencing other inodes that exist
2220 * only in the log (references come from either directory items or inode
2221 * back refs).
2222 */
2223static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2224 struct walk_control *wc, u64 gen)
2225{
2226 int nritems;
2227 struct btrfs_path *path;
2228 struct btrfs_root *root = wc->replay_dest;
2229 struct btrfs_key key;
e02119d5
CM
2230 int level;
2231 int i;
2232 int ret;
2233
018642a1
TI
2234 ret = btrfs_read_buffer(eb, gen);
2235 if (ret)
2236 return ret;
e02119d5
CM
2237
2238 level = btrfs_header_level(eb);
2239
2240 if (level != 0)
2241 return 0;
2242
2243 path = btrfs_alloc_path();
1e5063d0
MF
2244 if (!path)
2245 return -ENOMEM;
e02119d5
CM
2246
2247 nritems = btrfs_header_nritems(eb);
2248 for (i = 0; i < nritems; i++) {
2249 btrfs_item_key_to_cpu(eb, &key, i);
e02119d5
CM
2250
2251 /* inode keys are done during the first stage */
2252 if (key.type == BTRFS_INODE_ITEM_KEY &&
2253 wc->stage == LOG_WALK_REPLAY_INODES) {
e02119d5
CM
2254 struct btrfs_inode_item *inode_item;
2255 u32 mode;
2256
2257 inode_item = btrfs_item_ptr(eb, i,
2258 struct btrfs_inode_item);
4f764e51
FM
2259 ret = replay_xattr_deletes(wc->trans, root, log,
2260 path, key.objectid);
2261 if (ret)
2262 break;
e02119d5
CM
2263 mode = btrfs_inode_mode(eb, inode_item);
2264 if (S_ISDIR(mode)) {
2265 ret = replay_dir_deletes(wc->trans,
12fcfd22 2266 root, log, path, key.objectid, 0);
b50c6e25
JB
2267 if (ret)
2268 break;
e02119d5
CM
2269 }
2270 ret = overwrite_item(wc->trans, root, path,
2271 eb, i, &key);
b50c6e25
JB
2272 if (ret)
2273 break;
e02119d5 2274
c71bf099
YZ
2275 /* for regular files, make sure corresponding
2276 * orhpan item exist. extents past the new EOF
2277 * will be truncated later by orphan cleanup.
e02119d5
CM
2278 */
2279 if (S_ISREG(mode)) {
c71bf099
YZ
2280 ret = insert_orphan_item(wc->trans, root,
2281 key.objectid);
b50c6e25
JB
2282 if (ret)
2283 break;
e02119d5 2284 }
c71bf099 2285
e02119d5
CM
2286 ret = link_to_fixup_dir(wc->trans, root,
2287 path, key.objectid);
b50c6e25
JB
2288 if (ret)
2289 break;
e02119d5 2290 }
dd8e7217
JB
2291
2292 if (key.type == BTRFS_DIR_INDEX_KEY &&
2293 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2294 ret = replay_one_dir_item(wc->trans, root, path,
2295 eb, i, &key);
2296 if (ret)
2297 break;
2298 }
2299
e02119d5
CM
2300 if (wc->stage < LOG_WALK_REPLAY_ALL)
2301 continue;
2302
2303 /* these keys are simply copied */
2304 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2305 ret = overwrite_item(wc->trans, root, path,
2306 eb, i, &key);
b50c6e25
JB
2307 if (ret)
2308 break;
2da1c669
LB
2309 } else if (key.type == BTRFS_INODE_REF_KEY ||
2310 key.type == BTRFS_INODE_EXTREF_KEY) {
f186373f
MF
2311 ret = add_inode_ref(wc->trans, root, log, path,
2312 eb, i, &key);
b50c6e25
JB
2313 if (ret && ret != -ENOENT)
2314 break;
2315 ret = 0;
e02119d5
CM
2316 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2317 ret = replay_one_extent(wc->trans, root, path,
2318 eb, i, &key);
b50c6e25
JB
2319 if (ret)
2320 break;
dd8e7217 2321 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
e02119d5
CM
2322 ret = replay_one_dir_item(wc->trans, root, path,
2323 eb, i, &key);
b50c6e25
JB
2324 if (ret)
2325 break;
e02119d5
CM
2326 }
2327 }
2328 btrfs_free_path(path);
b50c6e25 2329 return ret;
e02119d5
CM
2330}
2331
d397712b 2332static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
e02119d5
CM
2333 struct btrfs_root *root,
2334 struct btrfs_path *path, int *level,
2335 struct walk_control *wc)
2336{
2337 u64 root_owner;
e02119d5
CM
2338 u64 bytenr;
2339 u64 ptr_gen;
2340 struct extent_buffer *next;
2341 struct extent_buffer *cur;
2342 struct extent_buffer *parent;
2343 u32 blocksize;
2344 int ret = 0;
2345
2346 WARN_ON(*level < 0);
2347 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2348
d397712b 2349 while (*level > 0) {
e02119d5
CM
2350 WARN_ON(*level < 0);
2351 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2352 cur = path->nodes[*level];
2353
fae7f21c 2354 WARN_ON(btrfs_header_level(cur) != *level);
e02119d5
CM
2355
2356 if (path->slots[*level] >=
2357 btrfs_header_nritems(cur))
2358 break;
2359
2360 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2361 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
707e8a07 2362 blocksize = root->nodesize;
e02119d5
CM
2363
2364 parent = path->nodes[*level];
2365 root_owner = btrfs_header_owner(parent);
e02119d5 2366
a83fffb7 2367 next = btrfs_find_create_tree_block(root, bytenr);
2a29edc6 2368 if (!next)
2369 return -ENOMEM;
e02119d5 2370
e02119d5 2371 if (*level == 1) {
1e5063d0 2372 ret = wc->process_func(root, next, wc, ptr_gen);
b50c6e25
JB
2373 if (ret) {
2374 free_extent_buffer(next);
1e5063d0 2375 return ret;
b50c6e25 2376 }
4a500fd1 2377
e02119d5
CM
2378 path->slots[*level]++;
2379 if (wc->free) {
018642a1
TI
2380 ret = btrfs_read_buffer(next, ptr_gen);
2381 if (ret) {
2382 free_extent_buffer(next);
2383 return ret;
2384 }
e02119d5 2385
681ae509
JB
2386 if (trans) {
2387 btrfs_tree_lock(next);
2388 btrfs_set_lock_blocking(next);
01d58472
DD
2389 clean_tree_block(trans, root->fs_info,
2390 next);
681ae509
JB
2391 btrfs_wait_tree_block_writeback(next);
2392 btrfs_tree_unlock(next);
2393 }
e02119d5 2394
e02119d5
CM
2395 WARN_ON(root_owner !=
2396 BTRFS_TREE_LOG_OBJECTID);
e688b725 2397 ret = btrfs_free_and_pin_reserved_extent(root,
d00aff00 2398 bytenr, blocksize);
3650860b
JB
2399 if (ret) {
2400 free_extent_buffer(next);
2401 return ret;
2402 }
e02119d5
CM
2403 }
2404 free_extent_buffer(next);
2405 continue;
2406 }
018642a1
TI
2407 ret = btrfs_read_buffer(next, ptr_gen);
2408 if (ret) {
2409 free_extent_buffer(next);
2410 return ret;
2411 }
e02119d5
CM
2412
2413 WARN_ON(*level <= 0);
2414 if (path->nodes[*level-1])
2415 free_extent_buffer(path->nodes[*level-1]);
2416 path->nodes[*level-1] = next;
2417 *level = btrfs_header_level(next);
2418 path->slots[*level] = 0;
2419 cond_resched();
2420 }
2421 WARN_ON(*level < 0);
2422 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2423
4a500fd1 2424 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
e02119d5
CM
2425
2426 cond_resched();
2427 return 0;
2428}
2429
d397712b 2430static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
e02119d5
CM
2431 struct btrfs_root *root,
2432 struct btrfs_path *path, int *level,
2433 struct walk_control *wc)
2434{
2435 u64 root_owner;
e02119d5
CM
2436 int i;
2437 int slot;
2438 int ret;
2439
d397712b 2440 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
e02119d5 2441 slot = path->slots[i];
4a500fd1 2442 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
e02119d5
CM
2443 path->slots[i]++;
2444 *level = i;
2445 WARN_ON(*level == 0);
2446 return 0;
2447 } else {
31840ae1
ZY
2448 struct extent_buffer *parent;
2449 if (path->nodes[*level] == root->node)
2450 parent = path->nodes[*level];
2451 else
2452 parent = path->nodes[*level + 1];
2453
2454 root_owner = btrfs_header_owner(parent);
1e5063d0 2455 ret = wc->process_func(root, path->nodes[*level], wc,
e02119d5 2456 btrfs_header_generation(path->nodes[*level]));
1e5063d0
MF
2457 if (ret)
2458 return ret;
2459
e02119d5
CM
2460 if (wc->free) {
2461 struct extent_buffer *next;
2462
2463 next = path->nodes[*level];
2464
681ae509
JB
2465 if (trans) {
2466 btrfs_tree_lock(next);
2467 btrfs_set_lock_blocking(next);
01d58472
DD
2468 clean_tree_block(trans, root->fs_info,
2469 next);
681ae509
JB
2470 btrfs_wait_tree_block_writeback(next);
2471 btrfs_tree_unlock(next);
2472 }
e02119d5 2473
e02119d5 2474 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
e688b725 2475 ret = btrfs_free_and_pin_reserved_extent(root,
e02119d5 2476 path->nodes[*level]->start,
d00aff00 2477 path->nodes[*level]->len);
3650860b
JB
2478 if (ret)
2479 return ret;
e02119d5
CM
2480 }
2481 free_extent_buffer(path->nodes[*level]);
2482 path->nodes[*level] = NULL;
2483 *level = i + 1;
2484 }
2485 }
2486 return 1;
2487}
2488
2489/*
2490 * drop the reference count on the tree rooted at 'snap'. This traverses
2491 * the tree freeing any blocks that have a ref count of zero after being
2492 * decremented.
2493 */
2494static int walk_log_tree(struct btrfs_trans_handle *trans,
2495 struct btrfs_root *log, struct walk_control *wc)
2496{
2497 int ret = 0;
2498 int wret;
2499 int level;
2500 struct btrfs_path *path;
e02119d5
CM
2501 int orig_level;
2502
2503 path = btrfs_alloc_path();
db5b493a
TI
2504 if (!path)
2505 return -ENOMEM;
e02119d5
CM
2506
2507 level = btrfs_header_level(log->node);
2508 orig_level = level;
2509 path->nodes[level] = log->node;
2510 extent_buffer_get(log->node);
2511 path->slots[level] = 0;
2512
d397712b 2513 while (1) {
e02119d5
CM
2514 wret = walk_down_log_tree(trans, log, path, &level, wc);
2515 if (wret > 0)
2516 break;
79787eaa 2517 if (wret < 0) {
e02119d5 2518 ret = wret;
79787eaa
JM
2519 goto out;
2520 }
e02119d5
CM
2521
2522 wret = walk_up_log_tree(trans, log, path, &level, wc);
2523 if (wret > 0)
2524 break;
79787eaa 2525 if (wret < 0) {
e02119d5 2526 ret = wret;
79787eaa
JM
2527 goto out;
2528 }
e02119d5
CM
2529 }
2530
2531 /* was the root node processed? if not, catch it here */
2532 if (path->nodes[orig_level]) {
79787eaa 2533 ret = wc->process_func(log, path->nodes[orig_level], wc,
e02119d5 2534 btrfs_header_generation(path->nodes[orig_level]));
79787eaa
JM
2535 if (ret)
2536 goto out;
e02119d5
CM
2537 if (wc->free) {
2538 struct extent_buffer *next;
2539
2540 next = path->nodes[orig_level];
2541
681ae509
JB
2542 if (trans) {
2543 btrfs_tree_lock(next);
2544 btrfs_set_lock_blocking(next);
01d58472 2545 clean_tree_block(trans, log->fs_info, next);
681ae509
JB
2546 btrfs_wait_tree_block_writeback(next);
2547 btrfs_tree_unlock(next);
2548 }
e02119d5 2549
e02119d5
CM
2550 WARN_ON(log->root_key.objectid !=
2551 BTRFS_TREE_LOG_OBJECTID);
e688b725 2552 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
d00aff00 2553 next->len);
3650860b
JB
2554 if (ret)
2555 goto out;
e02119d5
CM
2556 }
2557 }
2558
79787eaa 2559out:
e02119d5 2560 btrfs_free_path(path);
e02119d5
CM
2561 return ret;
2562}
2563
7237f183
YZ
2564/*
2565 * helper function to update the item for a given subvolumes log root
2566 * in the tree of log roots
2567 */
2568static int update_log_root(struct btrfs_trans_handle *trans,
2569 struct btrfs_root *log)
2570{
2571 int ret;
2572
2573 if (log->log_transid == 1) {
2574 /* insert root item on the first sync */
2575 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2576 &log->root_key, &log->root_item);
2577 } else {
2578 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2579 &log->root_key, &log->root_item);
2580 }
2581 return ret;
2582}
2583
60d53eb3 2584static void wait_log_commit(struct btrfs_root *root, int transid)
e02119d5
CM
2585{
2586 DEFINE_WAIT(wait);
7237f183 2587 int index = transid % 2;
e02119d5 2588
7237f183
YZ
2589 /*
2590 * we only allow two pending log transactions at a time,
2591 * so we know that if ours is more than 2 older than the
2592 * current transaction, we're done
2593 */
e02119d5 2594 do {
7237f183
YZ
2595 prepare_to_wait(&root->log_commit_wait[index],
2596 &wait, TASK_UNINTERRUPTIBLE);
2597 mutex_unlock(&root->log_mutex);
12fcfd22 2598
d1433deb 2599 if (root->log_transid_committed < transid &&
7237f183
YZ
2600 atomic_read(&root->log_commit[index]))
2601 schedule();
12fcfd22 2602
7237f183
YZ
2603 finish_wait(&root->log_commit_wait[index], &wait);
2604 mutex_lock(&root->log_mutex);
d1433deb 2605 } while (root->log_transid_committed < transid &&
7237f183 2606 atomic_read(&root->log_commit[index]));
7237f183
YZ
2607}
2608
60d53eb3 2609static void wait_for_writer(struct btrfs_root *root)
7237f183
YZ
2610{
2611 DEFINE_WAIT(wait);
8b050d35
MX
2612
2613 while (atomic_read(&root->log_writers)) {
7237f183
YZ
2614 prepare_to_wait(&root->log_writer_wait,
2615 &wait, TASK_UNINTERRUPTIBLE);
2616 mutex_unlock(&root->log_mutex);
8b050d35 2617 if (atomic_read(&root->log_writers))
e02119d5 2618 schedule();
7237f183 2619 finish_wait(&root->log_writer_wait, &wait);
575849ec 2620 mutex_lock(&root->log_mutex);
7237f183 2621 }
e02119d5
CM
2622}
2623
8b050d35
MX
2624static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2625 struct btrfs_log_ctx *ctx)
2626{
2627 if (!ctx)
2628 return;
2629
2630 mutex_lock(&root->log_mutex);
2631 list_del_init(&ctx->list);
2632 mutex_unlock(&root->log_mutex);
2633}
2634
2635/*
2636 * Invoked in log mutex context, or be sure there is no other task which
2637 * can access the list.
2638 */
2639static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2640 int index, int error)
2641{
2642 struct btrfs_log_ctx *ctx;
2643
2644 if (!error) {
2645 INIT_LIST_HEAD(&root->log_ctxs[index]);
2646 return;
2647 }
2648
2649 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2650 ctx->log_ret = error;
2651
2652 INIT_LIST_HEAD(&root->log_ctxs[index]);
2653}
2654
e02119d5
CM
2655/*
2656 * btrfs_sync_log does sends a given tree log down to the disk and
2657 * updates the super blocks to record it. When this call is done,
12fcfd22
CM
2658 * you know that any inodes previously logged are safely on disk only
2659 * if it returns 0.
2660 *
2661 * Any other return value means you need to call btrfs_commit_transaction.
2662 * Some of the edge cases for fsyncing directories that have had unlinks
2663 * or renames done in the past mean that sometimes the only safe
2664 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2665 * that has happened.
e02119d5
CM
2666 */
2667int btrfs_sync_log(struct btrfs_trans_handle *trans,
8b050d35 2668 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
e02119d5 2669{
7237f183
YZ
2670 int index1;
2671 int index2;
8cef4e16 2672 int mark;
e02119d5 2673 int ret;
e02119d5 2674 struct btrfs_root *log = root->log_root;
7237f183 2675 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
bb14a59b 2676 int log_transid = 0;
8b050d35 2677 struct btrfs_log_ctx root_log_ctx;
c6adc9cc 2678 struct blk_plug plug;
e02119d5 2679
7237f183 2680 mutex_lock(&root->log_mutex);
d1433deb
MX
2681 log_transid = ctx->log_transid;
2682 if (root->log_transid_committed >= log_transid) {
2683 mutex_unlock(&root->log_mutex);
2684 return ctx->log_ret;
2685 }
2686
2687 index1 = log_transid % 2;
7237f183 2688 if (atomic_read(&root->log_commit[index1])) {
60d53eb3 2689 wait_log_commit(root, log_transid);
7237f183 2690 mutex_unlock(&root->log_mutex);
8b050d35 2691 return ctx->log_ret;
e02119d5 2692 }
d1433deb 2693 ASSERT(log_transid == root->log_transid);
7237f183
YZ
2694 atomic_set(&root->log_commit[index1], 1);
2695
2696 /* wait for previous tree log sync to complete */
2697 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
60d53eb3 2698 wait_log_commit(root, log_transid - 1);
48cab2e0 2699
86df7eb9 2700 while (1) {
2ecb7923 2701 int batch = atomic_read(&root->log_batch);
cd354ad6 2702 /* when we're on an ssd, just kick the log commit out */
27cdeb70
MX
2703 if (!btrfs_test_opt(root, SSD) &&
2704 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
86df7eb9
YZ
2705 mutex_unlock(&root->log_mutex);
2706 schedule_timeout_uninterruptible(1);
2707 mutex_lock(&root->log_mutex);
2708 }
60d53eb3 2709 wait_for_writer(root);
2ecb7923 2710 if (batch == atomic_read(&root->log_batch))
e02119d5
CM
2711 break;
2712 }
e02119d5 2713
12fcfd22 2714 /* bail out if we need to do a full commit */
995946dd 2715 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
12fcfd22 2716 ret = -EAGAIN;
2ab28f32 2717 btrfs_free_logged_extents(log, log_transid);
12fcfd22
CM
2718 mutex_unlock(&root->log_mutex);
2719 goto out;
2720 }
2721
8cef4e16
YZ
2722 if (log_transid % 2 == 0)
2723 mark = EXTENT_DIRTY;
2724 else
2725 mark = EXTENT_NEW;
2726
690587d1
CM
2727 /* we start IO on all the marked extents here, but we don't actually
2728 * wait for them until later.
2729 */
c6adc9cc 2730 blk_start_plug(&plug);
8cef4e16 2731 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
79787eaa 2732 if (ret) {
c6adc9cc 2733 blk_finish_plug(&plug);
79787eaa 2734 btrfs_abort_transaction(trans, root, ret);
2ab28f32 2735 btrfs_free_logged_extents(log, log_transid);
995946dd 2736 btrfs_set_log_full_commit(root->fs_info, trans);
79787eaa
JM
2737 mutex_unlock(&root->log_mutex);
2738 goto out;
2739 }
7237f183 2740
5d4f98a2 2741 btrfs_set_root_node(&log->root_item, log->node);
7237f183 2742
7237f183
YZ
2743 root->log_transid++;
2744 log->log_transid = root->log_transid;
ff782e0a 2745 root->log_start_pid = 0;
7237f183 2746 /*
8cef4e16
YZ
2747 * IO has been started, blocks of the log tree have WRITTEN flag set
2748 * in their headers. new modifications of the log will be written to
2749 * new positions. so it's safe to allow log writers to go in.
7237f183
YZ
2750 */
2751 mutex_unlock(&root->log_mutex);
2752
d1433deb
MX
2753 btrfs_init_log_ctx(&root_log_ctx);
2754
7237f183 2755 mutex_lock(&log_root_tree->log_mutex);
2ecb7923 2756 atomic_inc(&log_root_tree->log_batch);
7237f183 2757 atomic_inc(&log_root_tree->log_writers);
d1433deb
MX
2758
2759 index2 = log_root_tree->log_transid % 2;
2760 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2761 root_log_ctx.log_transid = log_root_tree->log_transid;
2762
7237f183
YZ
2763 mutex_unlock(&log_root_tree->log_mutex);
2764
2765 ret = update_log_root(trans, log);
7237f183
YZ
2766
2767 mutex_lock(&log_root_tree->log_mutex);
2768 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2769 smp_mb();
2770 if (waitqueue_active(&log_root_tree->log_writer_wait))
2771 wake_up(&log_root_tree->log_writer_wait);
2772 }
2773
4a500fd1 2774 if (ret) {
d1433deb
MX
2775 if (!list_empty(&root_log_ctx.list))
2776 list_del_init(&root_log_ctx.list);
2777
c6adc9cc 2778 blk_finish_plug(&plug);
995946dd
MX
2779 btrfs_set_log_full_commit(root->fs_info, trans);
2780
79787eaa
JM
2781 if (ret != -ENOSPC) {
2782 btrfs_abort_transaction(trans, root, ret);
2783 mutex_unlock(&log_root_tree->log_mutex);
2784 goto out;
2785 }
4a500fd1 2786 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2ab28f32 2787 btrfs_free_logged_extents(log, log_transid);
4a500fd1
YZ
2788 mutex_unlock(&log_root_tree->log_mutex);
2789 ret = -EAGAIN;
2790 goto out;
2791 }
2792
d1433deb 2793 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3da5ab56 2794 blk_finish_plug(&plug);
d1433deb
MX
2795 mutex_unlock(&log_root_tree->log_mutex);
2796 ret = root_log_ctx.log_ret;
2797 goto out;
2798 }
8b050d35 2799
d1433deb 2800 index2 = root_log_ctx.log_transid % 2;
7237f183 2801 if (atomic_read(&log_root_tree->log_commit[index2])) {
c6adc9cc 2802 blk_finish_plug(&plug);
5ab5e44a
FM
2803 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2804 mark);
50d9aa99 2805 btrfs_wait_logged_extents(trans, log, log_transid);
60d53eb3 2806 wait_log_commit(log_root_tree,
d1433deb 2807 root_log_ctx.log_transid);
7237f183 2808 mutex_unlock(&log_root_tree->log_mutex);
5ab5e44a
FM
2809 if (!ret)
2810 ret = root_log_ctx.log_ret;
7237f183
YZ
2811 goto out;
2812 }
d1433deb 2813 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
7237f183
YZ
2814 atomic_set(&log_root_tree->log_commit[index2], 1);
2815
12fcfd22 2816 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
60d53eb3 2817 wait_log_commit(log_root_tree,
d1433deb 2818 root_log_ctx.log_transid - 1);
12fcfd22
CM
2819 }
2820
60d53eb3 2821 wait_for_writer(log_root_tree);
7237f183 2822
12fcfd22
CM
2823 /*
2824 * now that we've moved on to the tree of log tree roots,
2825 * check the full commit flag again
2826 */
995946dd 2827 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
c6adc9cc 2828 blk_finish_plug(&plug);
8cef4e16 2829 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2ab28f32 2830 btrfs_free_logged_extents(log, log_transid);
12fcfd22
CM
2831 mutex_unlock(&log_root_tree->log_mutex);
2832 ret = -EAGAIN;
2833 goto out_wake_log_root;
2834 }
7237f183 2835
c6adc9cc
MX
2836 ret = btrfs_write_marked_extents(log_root_tree,
2837 &log_root_tree->dirty_log_pages,
2838 EXTENT_DIRTY | EXTENT_NEW);
2839 blk_finish_plug(&plug);
79787eaa 2840 if (ret) {
995946dd 2841 btrfs_set_log_full_commit(root->fs_info, trans);
79787eaa 2842 btrfs_abort_transaction(trans, root, ret);
2ab28f32 2843 btrfs_free_logged_extents(log, log_transid);
79787eaa
JM
2844 mutex_unlock(&log_root_tree->log_mutex);
2845 goto out_wake_log_root;
2846 }
5ab5e44a
FM
2847 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2848 if (!ret)
2849 ret = btrfs_wait_marked_extents(log_root_tree,
2850 &log_root_tree->dirty_log_pages,
2851 EXTENT_NEW | EXTENT_DIRTY);
2852 if (ret) {
2853 btrfs_set_log_full_commit(root->fs_info, trans);
2854 btrfs_free_logged_extents(log, log_transid);
2855 mutex_unlock(&log_root_tree->log_mutex);
2856 goto out_wake_log_root;
2857 }
50d9aa99 2858 btrfs_wait_logged_extents(trans, log, log_transid);
e02119d5 2859
6c41761f 2860 btrfs_set_super_log_root(root->fs_info->super_for_commit,
7237f183 2861 log_root_tree->node->start);
6c41761f 2862 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
7237f183 2863 btrfs_header_level(log_root_tree->node));
e02119d5 2864
7237f183 2865 log_root_tree->log_transid++;
7237f183
YZ
2866 mutex_unlock(&log_root_tree->log_mutex);
2867
2868 /*
2869 * nobody else is going to jump in and write the the ctree
2870 * super here because the log_commit atomic below is protecting
2871 * us. We must be called with a transaction handle pinning
2872 * the running transaction open, so a full commit can't hop
2873 * in and cause problems either.
2874 */
5af3e8cc 2875 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
5af3e8cc 2876 if (ret) {
995946dd 2877 btrfs_set_log_full_commit(root->fs_info, trans);
5af3e8cc
SB
2878 btrfs_abort_transaction(trans, root, ret);
2879 goto out_wake_log_root;
2880 }
7237f183 2881
257c62e1
CM
2882 mutex_lock(&root->log_mutex);
2883 if (root->last_log_commit < log_transid)
2884 root->last_log_commit = log_transid;
2885 mutex_unlock(&root->log_mutex);
2886
12fcfd22 2887out_wake_log_root:
8b050d35
MX
2888 /*
2889 * We needn't get log_mutex here because we are sure all
2890 * the other tasks are blocked.
2891 */
2892 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2893
d1433deb
MX
2894 mutex_lock(&log_root_tree->log_mutex);
2895 log_root_tree->log_transid_committed++;
7237f183 2896 atomic_set(&log_root_tree->log_commit[index2], 0);
d1433deb
MX
2897 mutex_unlock(&log_root_tree->log_mutex);
2898
7237f183
YZ
2899 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2900 wake_up(&log_root_tree->log_commit_wait[index2]);
e02119d5 2901out:
8b050d35
MX
2902 /* See above. */
2903 btrfs_remove_all_log_ctxs(root, index1, ret);
2904
d1433deb
MX
2905 mutex_lock(&root->log_mutex);
2906 root->log_transid_committed++;
7237f183 2907 atomic_set(&root->log_commit[index1], 0);
d1433deb 2908 mutex_unlock(&root->log_mutex);
8b050d35 2909
7237f183
YZ
2910 if (waitqueue_active(&root->log_commit_wait[index1]))
2911 wake_up(&root->log_commit_wait[index1]);
b31eabd8 2912 return ret;
e02119d5
CM
2913}
2914
4a500fd1
YZ
2915static void free_log_tree(struct btrfs_trans_handle *trans,
2916 struct btrfs_root *log)
e02119d5
CM
2917{
2918 int ret;
d0c803c4
CM
2919 u64 start;
2920 u64 end;
e02119d5
CM
2921 struct walk_control wc = {
2922 .free = 1,
2923 .process_func = process_one_buffer
2924 };
2925
681ae509
JB
2926 ret = walk_log_tree(trans, log, &wc);
2927 /* I don't think this can happen but just in case */
2928 if (ret)
2929 btrfs_abort_transaction(trans, log, ret);
e02119d5 2930
d397712b 2931 while (1) {
d0c803c4 2932 ret = find_first_extent_bit(&log->dirty_log_pages,
e6138876
JB
2933 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2934 NULL);
d0c803c4
CM
2935 if (ret)
2936 break;
2937
8cef4e16
YZ
2938 clear_extent_bits(&log->dirty_log_pages, start, end,
2939 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
d0c803c4
CM
2940 }
2941
2ab28f32
JB
2942 /*
2943 * We may have short-circuited the log tree with the full commit logic
2944 * and left ordered extents on our list, so clear these out to keep us
2945 * from leaking inodes and memory.
2946 */
2947 btrfs_free_logged_extents(log, 0);
2948 btrfs_free_logged_extents(log, 1);
2949
7237f183
YZ
2950 free_extent_buffer(log->node);
2951 kfree(log);
4a500fd1
YZ
2952}
2953
2954/*
2955 * free all the extents used by the tree log. This should be called
2956 * at commit time of the full transaction
2957 */
2958int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2959{
2960 if (root->log_root) {
2961 free_log_tree(trans, root->log_root);
2962 root->log_root = NULL;
2963 }
2964 return 0;
2965}
2966
2967int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2968 struct btrfs_fs_info *fs_info)
2969{
2970 if (fs_info->log_root_tree) {
2971 free_log_tree(trans, fs_info->log_root_tree);
2972 fs_info->log_root_tree = NULL;
2973 }
e02119d5
CM
2974 return 0;
2975}
2976
e02119d5
CM
2977/*
2978 * If both a file and directory are logged, and unlinks or renames are
2979 * mixed in, we have a few interesting corners:
2980 *
2981 * create file X in dir Y
2982 * link file X to X.link in dir Y
2983 * fsync file X
2984 * unlink file X but leave X.link
2985 * fsync dir Y
2986 *
2987 * After a crash we would expect only X.link to exist. But file X
2988 * didn't get fsync'd again so the log has back refs for X and X.link.
2989 *
2990 * We solve this by removing directory entries and inode backrefs from the
2991 * log when a file that was logged in the current transaction is
2992 * unlinked. Any later fsync will include the updated log entries, and
2993 * we'll be able to reconstruct the proper directory items from backrefs.
2994 *
2995 * This optimizations allows us to avoid relogging the entire inode
2996 * or the entire directory.
2997 */
2998int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2999 struct btrfs_root *root,
3000 const char *name, int name_len,
3001 struct inode *dir, u64 index)
3002{
3003 struct btrfs_root *log;
3004 struct btrfs_dir_item *di;
3005 struct btrfs_path *path;
3006 int ret;
4a500fd1 3007 int err = 0;
e02119d5 3008 int bytes_del = 0;
33345d01 3009 u64 dir_ino = btrfs_ino(dir);
e02119d5 3010
3a5f1d45
CM
3011 if (BTRFS_I(dir)->logged_trans < trans->transid)
3012 return 0;
3013
e02119d5
CM
3014 ret = join_running_log_trans(root);
3015 if (ret)
3016 return 0;
3017
3018 mutex_lock(&BTRFS_I(dir)->log_mutex);
3019
3020 log = root->log_root;
3021 path = btrfs_alloc_path();
a62f44a5
TI
3022 if (!path) {
3023 err = -ENOMEM;
3024 goto out_unlock;
3025 }
2a29edc6 3026
33345d01 3027 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
e02119d5 3028 name, name_len, -1);
4a500fd1
YZ
3029 if (IS_ERR(di)) {
3030 err = PTR_ERR(di);
3031 goto fail;
3032 }
3033 if (di) {
e02119d5
CM
3034 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3035 bytes_del += name_len;
3650860b
JB
3036 if (ret) {
3037 err = ret;
3038 goto fail;
3039 }
e02119d5 3040 }
b3b4aa74 3041 btrfs_release_path(path);
33345d01 3042 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
e02119d5 3043 index, name, name_len, -1);
4a500fd1
YZ
3044 if (IS_ERR(di)) {
3045 err = PTR_ERR(di);
3046 goto fail;
3047 }
3048 if (di) {
e02119d5
CM
3049 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3050 bytes_del += name_len;
3650860b
JB
3051 if (ret) {
3052 err = ret;
3053 goto fail;
3054 }
e02119d5
CM
3055 }
3056
3057 /* update the directory size in the log to reflect the names
3058 * we have removed
3059 */
3060 if (bytes_del) {
3061 struct btrfs_key key;
3062
33345d01 3063 key.objectid = dir_ino;
e02119d5
CM
3064 key.offset = 0;
3065 key.type = BTRFS_INODE_ITEM_KEY;
b3b4aa74 3066 btrfs_release_path(path);
e02119d5
CM
3067
3068 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
4a500fd1
YZ
3069 if (ret < 0) {
3070 err = ret;
3071 goto fail;
3072 }
e02119d5
CM
3073 if (ret == 0) {
3074 struct btrfs_inode_item *item;
3075 u64 i_size;
3076
3077 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3078 struct btrfs_inode_item);
3079 i_size = btrfs_inode_size(path->nodes[0], item);
3080 if (i_size > bytes_del)
3081 i_size -= bytes_del;
3082 else
3083 i_size = 0;
3084 btrfs_set_inode_size(path->nodes[0], item, i_size);
3085 btrfs_mark_buffer_dirty(path->nodes[0]);
3086 } else
3087 ret = 0;
b3b4aa74 3088 btrfs_release_path(path);
e02119d5 3089 }
4a500fd1 3090fail:
e02119d5 3091 btrfs_free_path(path);
a62f44a5 3092out_unlock:
e02119d5 3093 mutex_unlock(&BTRFS_I(dir)->log_mutex);
4a500fd1 3094 if (ret == -ENOSPC) {
995946dd 3095 btrfs_set_log_full_commit(root->fs_info, trans);
4a500fd1 3096 ret = 0;
79787eaa
JM
3097 } else if (ret < 0)
3098 btrfs_abort_transaction(trans, root, ret);
3099
12fcfd22 3100 btrfs_end_log_trans(root);
e02119d5 3101
411fc6bc 3102 return err;
e02119d5
CM
3103}
3104
3105/* see comments for btrfs_del_dir_entries_in_log */
3106int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3107 struct btrfs_root *root,
3108 const char *name, int name_len,
3109 struct inode *inode, u64 dirid)
3110{
3111 struct btrfs_root *log;
3112 u64 index;
3113 int ret;
3114
3a5f1d45
CM
3115 if (BTRFS_I(inode)->logged_trans < trans->transid)
3116 return 0;
3117
e02119d5
CM
3118 ret = join_running_log_trans(root);
3119 if (ret)
3120 return 0;
3121 log = root->log_root;
3122 mutex_lock(&BTRFS_I(inode)->log_mutex);
3123
33345d01 3124 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
e02119d5
CM
3125 dirid, &index);
3126 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4a500fd1 3127 if (ret == -ENOSPC) {
995946dd 3128 btrfs_set_log_full_commit(root->fs_info, trans);
4a500fd1 3129 ret = 0;
79787eaa
JM
3130 } else if (ret < 0 && ret != -ENOENT)
3131 btrfs_abort_transaction(trans, root, ret);
12fcfd22 3132 btrfs_end_log_trans(root);
e02119d5 3133
e02119d5
CM
3134 return ret;
3135}
3136
3137/*
3138 * creates a range item in the log for 'dirid'. first_offset and
3139 * last_offset tell us which parts of the key space the log should
3140 * be considered authoritative for.
3141 */
3142static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3143 struct btrfs_root *log,
3144 struct btrfs_path *path,
3145 int key_type, u64 dirid,
3146 u64 first_offset, u64 last_offset)
3147{
3148 int ret;
3149 struct btrfs_key key;
3150 struct btrfs_dir_log_item *item;
3151
3152 key.objectid = dirid;
3153 key.offset = first_offset;
3154 if (key_type == BTRFS_DIR_ITEM_KEY)
3155 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3156 else
3157 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3158 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
4a500fd1
YZ
3159 if (ret)
3160 return ret;
e02119d5
CM
3161
3162 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3163 struct btrfs_dir_log_item);
3164 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3165 btrfs_mark_buffer_dirty(path->nodes[0]);
b3b4aa74 3166 btrfs_release_path(path);
e02119d5
CM
3167 return 0;
3168}
3169
3170/*
3171 * log all the items included in the current transaction for a given
3172 * directory. This also creates the range items in the log tree required
3173 * to replay anything deleted before the fsync
3174 */
3175static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3176 struct btrfs_root *root, struct inode *inode,
3177 struct btrfs_path *path,
3178 struct btrfs_path *dst_path, int key_type,
2f2ff0ee 3179 struct btrfs_log_ctx *ctx,
e02119d5
CM
3180 u64 min_offset, u64 *last_offset_ret)
3181{
3182 struct btrfs_key min_key;
e02119d5
CM
3183 struct btrfs_root *log = root->log_root;
3184 struct extent_buffer *src;
4a500fd1 3185 int err = 0;
e02119d5
CM
3186 int ret;
3187 int i;
3188 int nritems;
3189 u64 first_offset = min_offset;
3190 u64 last_offset = (u64)-1;
33345d01 3191 u64 ino = btrfs_ino(inode);
e02119d5
CM
3192
3193 log = root->log_root;
e02119d5 3194
33345d01 3195 min_key.objectid = ino;
e02119d5
CM
3196 min_key.type = key_type;
3197 min_key.offset = min_offset;
3198
6174d3cb 3199 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
e02119d5
CM
3200
3201 /*
3202 * we didn't find anything from this transaction, see if there
3203 * is anything at all
3204 */
33345d01
LZ
3205 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3206 min_key.objectid = ino;
e02119d5
CM
3207 min_key.type = key_type;
3208 min_key.offset = (u64)-1;
b3b4aa74 3209 btrfs_release_path(path);
e02119d5
CM
3210 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3211 if (ret < 0) {
b3b4aa74 3212 btrfs_release_path(path);
e02119d5
CM
3213 return ret;
3214 }
33345d01 3215 ret = btrfs_previous_item(root, path, ino, key_type);
e02119d5
CM
3216
3217 /* if ret == 0 there are items for this type,
3218 * create a range to tell us the last key of this type.
3219 * otherwise, there are no items in this directory after
3220 * *min_offset, and we create a range to indicate that.
3221 */
3222 if (ret == 0) {
3223 struct btrfs_key tmp;
3224 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3225 path->slots[0]);
d397712b 3226 if (key_type == tmp.type)
e02119d5 3227 first_offset = max(min_offset, tmp.offset) + 1;
e02119d5
CM
3228 }
3229 goto done;
3230 }
3231
3232 /* go backward to find any previous key */
33345d01 3233 ret = btrfs_previous_item(root, path, ino, key_type);
e02119d5
CM
3234 if (ret == 0) {
3235 struct btrfs_key tmp;
3236 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3237 if (key_type == tmp.type) {
3238 first_offset = tmp.offset;
3239 ret = overwrite_item(trans, log, dst_path,
3240 path->nodes[0], path->slots[0],
3241 &tmp);
4a500fd1
YZ
3242 if (ret) {
3243 err = ret;
3244 goto done;
3245 }
e02119d5
CM
3246 }
3247 }
b3b4aa74 3248 btrfs_release_path(path);
e02119d5
CM
3249
3250 /* find the first key from this transaction again */
3251 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
fae7f21c 3252 if (WARN_ON(ret != 0))
e02119d5 3253 goto done;
e02119d5
CM
3254
3255 /*
3256 * we have a block from this transaction, log every item in it
3257 * from our directory
3258 */
d397712b 3259 while (1) {
e02119d5
CM
3260 struct btrfs_key tmp;
3261 src = path->nodes[0];
3262 nritems = btrfs_header_nritems(src);
3263 for (i = path->slots[0]; i < nritems; i++) {
2f2ff0ee
FM
3264 struct btrfs_dir_item *di;
3265
e02119d5
CM
3266 btrfs_item_key_to_cpu(src, &min_key, i);
3267
33345d01 3268 if (min_key.objectid != ino || min_key.type != key_type)
e02119d5
CM
3269 goto done;
3270 ret = overwrite_item(trans, log, dst_path, src, i,
3271 &min_key);
4a500fd1
YZ
3272 if (ret) {
3273 err = ret;
3274 goto done;
3275 }
2f2ff0ee
FM
3276
3277 /*
3278 * We must make sure that when we log a directory entry,
3279 * the corresponding inode, after log replay, has a
3280 * matching link count. For example:
3281 *
3282 * touch foo
3283 * mkdir mydir
3284 * sync
3285 * ln foo mydir/bar
3286 * xfs_io -c "fsync" mydir
3287 * <crash>
3288 * <mount fs and log replay>
3289 *
3290 * Would result in a fsync log that when replayed, our
3291 * file inode would have a link count of 1, but we get
3292 * two directory entries pointing to the same inode.
3293 * After removing one of the names, it would not be
3294 * possible to remove the other name, which resulted
3295 * always in stale file handle errors, and would not
3296 * be possible to rmdir the parent directory, since
3297 * its i_size could never decrement to the value
3298 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3299 */
3300 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3301 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3302 if (ctx &&
3303 (btrfs_dir_transid(src, di) == trans->transid ||
3304 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3305 tmp.type != BTRFS_ROOT_ITEM_KEY)
3306 ctx->log_new_dentries = true;
e02119d5
CM
3307 }
3308 path->slots[0] = nritems;
3309
3310 /*
3311 * look ahead to the next item and see if it is also
3312 * from this directory and from this transaction
3313 */
3314 ret = btrfs_next_leaf(root, path);
3315 if (ret == 1) {
3316 last_offset = (u64)-1;
3317 goto done;
3318 }
3319 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
33345d01 3320 if (tmp.objectid != ino || tmp.type != key_type) {
e02119d5
CM
3321 last_offset = (u64)-1;
3322 goto done;
3323 }
3324 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3325 ret = overwrite_item(trans, log, dst_path,
3326 path->nodes[0], path->slots[0],
3327 &tmp);
4a500fd1
YZ
3328 if (ret)
3329 err = ret;
3330 else
3331 last_offset = tmp.offset;
e02119d5
CM
3332 goto done;
3333 }
3334 }
3335done:
b3b4aa74
DS
3336 btrfs_release_path(path);
3337 btrfs_release_path(dst_path);
e02119d5 3338
4a500fd1
YZ
3339 if (err == 0) {
3340 *last_offset_ret = last_offset;
3341 /*
3342 * insert the log range keys to indicate where the log
3343 * is valid
3344 */
3345 ret = insert_dir_log_key(trans, log, path, key_type,
33345d01 3346 ino, first_offset, last_offset);
4a500fd1
YZ
3347 if (ret)
3348 err = ret;
3349 }
3350 return err;
e02119d5
CM
3351}
3352
3353/*
3354 * logging directories is very similar to logging inodes, We find all the items
3355 * from the current transaction and write them to the log.
3356 *
3357 * The recovery code scans the directory in the subvolume, and if it finds a
3358 * key in the range logged that is not present in the log tree, then it means
3359 * that dir entry was unlinked during the transaction.
3360 *
3361 * In order for that scan to work, we must include one key smaller than
3362 * the smallest logged by this transaction and one key larger than the largest
3363 * key logged by this transaction.
3364 */
3365static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3366 struct btrfs_root *root, struct inode *inode,
3367 struct btrfs_path *path,
2f2ff0ee
FM
3368 struct btrfs_path *dst_path,
3369 struct btrfs_log_ctx *ctx)
e02119d5
CM
3370{
3371 u64 min_key;
3372 u64 max_key;
3373 int ret;
3374 int key_type = BTRFS_DIR_ITEM_KEY;
3375
3376again:
3377 min_key = 0;
3378 max_key = 0;
d397712b 3379 while (1) {
e02119d5 3380 ret = log_dir_items(trans, root, inode, path,
2f2ff0ee 3381 dst_path, key_type, ctx, min_key,
e02119d5 3382 &max_key);
4a500fd1
YZ
3383 if (ret)
3384 return ret;
e02119d5
CM
3385 if (max_key == (u64)-1)
3386 break;
3387 min_key = max_key + 1;
3388 }
3389
3390 if (key_type == BTRFS_DIR_ITEM_KEY) {
3391 key_type = BTRFS_DIR_INDEX_KEY;
3392 goto again;
3393 }
3394 return 0;
3395}
3396
3397/*
3398 * a helper function to drop items from the log before we relog an
3399 * inode. max_key_type indicates the highest item type to remove.
3400 * This cannot be run for file data extents because it does not
3401 * free the extents they point to.
3402 */
3403static int drop_objectid_items(struct btrfs_trans_handle *trans,
3404 struct btrfs_root *log,
3405 struct btrfs_path *path,
3406 u64 objectid, int max_key_type)
3407{
3408 int ret;
3409 struct btrfs_key key;
3410 struct btrfs_key found_key;
18ec90d6 3411 int start_slot;
e02119d5
CM
3412
3413 key.objectid = objectid;
3414 key.type = max_key_type;
3415 key.offset = (u64)-1;
3416
d397712b 3417 while (1) {
e02119d5 3418 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3650860b 3419 BUG_ON(ret == 0); /* Logic error */
4a500fd1 3420 if (ret < 0)
e02119d5
CM
3421 break;
3422
3423 if (path->slots[0] == 0)
3424 break;
3425
3426 path->slots[0]--;
3427 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3428 path->slots[0]);
3429
3430 if (found_key.objectid != objectid)
3431 break;
3432
18ec90d6
JB
3433 found_key.offset = 0;
3434 found_key.type = 0;
3435 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3436 &start_slot);
3437
3438 ret = btrfs_del_items(trans, log, path, start_slot,
3439 path->slots[0] - start_slot + 1);
3440 /*
3441 * If start slot isn't 0 then we don't need to re-search, we've
3442 * found the last guy with the objectid in this tree.
3443 */
3444 if (ret || start_slot != 0)
65a246c5 3445 break;
b3b4aa74 3446 btrfs_release_path(path);
e02119d5 3447 }
b3b4aa74 3448 btrfs_release_path(path);
5bdbeb21
JB
3449 if (ret > 0)
3450 ret = 0;
4a500fd1 3451 return ret;
e02119d5
CM
3452}
3453
94edf4ae
JB
3454static void fill_inode_item(struct btrfs_trans_handle *trans,
3455 struct extent_buffer *leaf,
3456 struct btrfs_inode_item *item,
1a4bcf47
FM
3457 struct inode *inode, int log_inode_only,
3458 u64 logged_isize)
94edf4ae 3459{
0b1c6cca
JB
3460 struct btrfs_map_token token;
3461
3462 btrfs_init_map_token(&token);
94edf4ae
JB
3463
3464 if (log_inode_only) {
3465 /* set the generation to zero so the recover code
3466 * can tell the difference between an logging
3467 * just to say 'this inode exists' and a logging
3468 * to say 'update this inode with these values'
3469 */
0b1c6cca 3470 btrfs_set_token_inode_generation(leaf, item, 0, &token);
1a4bcf47 3471 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
94edf4ae 3472 } else {
0b1c6cca
JB
3473 btrfs_set_token_inode_generation(leaf, item,
3474 BTRFS_I(inode)->generation,
3475 &token);
3476 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3477 }
3478
3479 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3480 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3481 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3482 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3483
a937b979 3484 btrfs_set_token_timespec_sec(leaf, &item->atime,
0b1c6cca 3485 inode->i_atime.tv_sec, &token);
a937b979 3486 btrfs_set_token_timespec_nsec(leaf, &item->atime,
0b1c6cca
JB
3487 inode->i_atime.tv_nsec, &token);
3488
a937b979 3489 btrfs_set_token_timespec_sec(leaf, &item->mtime,
0b1c6cca 3490 inode->i_mtime.tv_sec, &token);
a937b979 3491 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
0b1c6cca
JB
3492 inode->i_mtime.tv_nsec, &token);
3493
a937b979 3494 btrfs_set_token_timespec_sec(leaf, &item->ctime,
0b1c6cca 3495 inode->i_ctime.tv_sec, &token);
a937b979 3496 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
0b1c6cca
JB
3497 inode->i_ctime.tv_nsec, &token);
3498
3499 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3500 &token);
3501
3502 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3503 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3504 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3505 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3506 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
94edf4ae
JB
3507}
3508
a95249b3
JB
3509static int log_inode_item(struct btrfs_trans_handle *trans,
3510 struct btrfs_root *log, struct btrfs_path *path,
3511 struct inode *inode)
3512{
3513 struct btrfs_inode_item *inode_item;
a95249b3
JB
3514 int ret;
3515
efd0c405
FDBM
3516 ret = btrfs_insert_empty_item(trans, log, path,
3517 &BTRFS_I(inode)->location,
a95249b3
JB
3518 sizeof(*inode_item));
3519 if (ret && ret != -EEXIST)
3520 return ret;
3521 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3522 struct btrfs_inode_item);
1a4bcf47 3523 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
a95249b3
JB
3524 btrfs_release_path(path);
3525 return 0;
3526}
3527
31ff1cd2 3528static noinline int copy_items(struct btrfs_trans_handle *trans,
d2794405 3529 struct inode *inode,
31ff1cd2 3530 struct btrfs_path *dst_path,
16e7549f 3531 struct btrfs_path *src_path, u64 *last_extent,
1a4bcf47
FM
3532 int start_slot, int nr, int inode_only,
3533 u64 logged_isize)
31ff1cd2
CM
3534{
3535 unsigned long src_offset;
3536 unsigned long dst_offset;
d2794405 3537 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
31ff1cd2
CM
3538 struct btrfs_file_extent_item *extent;
3539 struct btrfs_inode_item *inode_item;
16e7549f
JB
3540 struct extent_buffer *src = src_path->nodes[0];
3541 struct btrfs_key first_key, last_key, key;
31ff1cd2
CM
3542 int ret;
3543 struct btrfs_key *ins_keys;
3544 u32 *ins_sizes;
3545 char *ins_data;
3546 int i;
d20f7043 3547 struct list_head ordered_sums;
d2794405 3548 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
16e7549f 3549 bool has_extents = false;
74121f7c 3550 bool need_find_last_extent = true;
16e7549f 3551 bool done = false;
d20f7043
CM
3552
3553 INIT_LIST_HEAD(&ordered_sums);
31ff1cd2
CM
3554
3555 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3556 nr * sizeof(u32), GFP_NOFS);
2a29edc6 3557 if (!ins_data)
3558 return -ENOMEM;
3559
16e7549f
JB
3560 first_key.objectid = (u64)-1;
3561
31ff1cd2
CM
3562 ins_sizes = (u32 *)ins_data;
3563 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3564
3565 for (i = 0; i < nr; i++) {
3566 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3567 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3568 }
3569 ret = btrfs_insert_empty_items(trans, log, dst_path,
3570 ins_keys, ins_sizes, nr);
4a500fd1
YZ
3571 if (ret) {
3572 kfree(ins_data);
3573 return ret;
3574 }
31ff1cd2 3575
5d4f98a2 3576 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
31ff1cd2
CM
3577 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3578 dst_path->slots[0]);
3579
3580 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3581
16e7549f
JB
3582 if ((i == (nr - 1)))
3583 last_key = ins_keys[i];
3584
94edf4ae 3585 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
31ff1cd2
CM
3586 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3587 dst_path->slots[0],
3588 struct btrfs_inode_item);
94edf4ae 3589 fill_inode_item(trans, dst_path->nodes[0], inode_item,
1a4bcf47
FM
3590 inode, inode_only == LOG_INODE_EXISTS,
3591 logged_isize);
94edf4ae
JB
3592 } else {
3593 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3594 src_offset, ins_sizes[i]);
31ff1cd2 3595 }
94edf4ae 3596
16e7549f
JB
3597 /*
3598 * We set need_find_last_extent here in case we know we were
3599 * processing other items and then walk into the first extent in
3600 * the inode. If we don't hit an extent then nothing changes,
3601 * we'll do the last search the next time around.
3602 */
3603 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3604 has_extents = true;
74121f7c 3605 if (first_key.objectid == (u64)-1)
16e7549f
JB
3606 first_key = ins_keys[i];
3607 } else {
3608 need_find_last_extent = false;
3609 }
3610
31ff1cd2
CM
3611 /* take a reference on file data extents so that truncates
3612 * or deletes of this inode don't have to relog the inode
3613 * again
3614 */
962a298f 3615 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
d2794405 3616 !skip_csum) {
31ff1cd2
CM
3617 int found_type;
3618 extent = btrfs_item_ptr(src, start_slot + i,
3619 struct btrfs_file_extent_item);
3620
8e531cdf 3621 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3622 continue;
3623
31ff1cd2 3624 found_type = btrfs_file_extent_type(src, extent);
6f1fed77 3625 if (found_type == BTRFS_FILE_EXTENT_REG) {
5d4f98a2
YZ
3626 u64 ds, dl, cs, cl;
3627 ds = btrfs_file_extent_disk_bytenr(src,
3628 extent);
3629 /* ds == 0 is a hole */
3630 if (ds == 0)
3631 continue;
3632
3633 dl = btrfs_file_extent_disk_num_bytes(src,
3634 extent);
3635 cs = btrfs_file_extent_offset(src, extent);
3636 cl = btrfs_file_extent_num_bytes(src,
a419aef8 3637 extent);
580afd76
CM
3638 if (btrfs_file_extent_compression(src,
3639 extent)) {
3640 cs = 0;
3641 cl = dl;
3642 }
5d4f98a2
YZ
3643
3644 ret = btrfs_lookup_csums_range(
3645 log->fs_info->csum_root,
3646 ds + cs, ds + cs + cl - 1,
a2de733c 3647 &ordered_sums, 0);
3650860b
JB
3648 if (ret) {
3649 btrfs_release_path(dst_path);
3650 kfree(ins_data);
3651 return ret;
3652 }
31ff1cd2
CM
3653 }
3654 }
31ff1cd2
CM
3655 }
3656
3657 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
b3b4aa74 3658 btrfs_release_path(dst_path);
31ff1cd2 3659 kfree(ins_data);
d20f7043
CM
3660
3661 /*
3662 * we have to do this after the loop above to avoid changing the
3663 * log tree while trying to change the log tree.
3664 */
4a500fd1 3665 ret = 0;
d397712b 3666 while (!list_empty(&ordered_sums)) {
d20f7043
CM
3667 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3668 struct btrfs_ordered_sum,
3669 list);
4a500fd1
YZ
3670 if (!ret)
3671 ret = btrfs_csum_file_blocks(trans, log, sums);
d20f7043
CM
3672 list_del(&sums->list);
3673 kfree(sums);
3674 }
16e7549f
JB
3675
3676 if (!has_extents)
3677 return ret;
3678
74121f7c
FM
3679 if (need_find_last_extent && *last_extent == first_key.offset) {
3680 /*
3681 * We don't have any leafs between our current one and the one
3682 * we processed before that can have file extent items for our
3683 * inode (and have a generation number smaller than our current
3684 * transaction id).
3685 */
3686 need_find_last_extent = false;
3687 }
3688
16e7549f
JB
3689 /*
3690 * Because we use btrfs_search_forward we could skip leaves that were
3691 * not modified and then assume *last_extent is valid when it really
3692 * isn't. So back up to the previous leaf and read the end of the last
3693 * extent before we go and fill in holes.
3694 */
3695 if (need_find_last_extent) {
3696 u64 len;
3697
3698 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3699 if (ret < 0)
3700 return ret;
3701 if (ret)
3702 goto fill_holes;
3703 if (src_path->slots[0])
3704 src_path->slots[0]--;
3705 src = src_path->nodes[0];
3706 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3707 if (key.objectid != btrfs_ino(inode) ||
3708 key.type != BTRFS_EXTENT_DATA_KEY)
3709 goto fill_holes;
3710 extent = btrfs_item_ptr(src, src_path->slots[0],
3711 struct btrfs_file_extent_item);
3712 if (btrfs_file_extent_type(src, extent) ==
3713 BTRFS_FILE_EXTENT_INLINE) {
514ac8ad
CM
3714 len = btrfs_file_extent_inline_len(src,
3715 src_path->slots[0],
3716 extent);
16e7549f
JB
3717 *last_extent = ALIGN(key.offset + len,
3718 log->sectorsize);
3719 } else {
3720 len = btrfs_file_extent_num_bytes(src, extent);
3721 *last_extent = key.offset + len;
3722 }
3723 }
3724fill_holes:
3725 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3726 * things could have happened
3727 *
3728 * 1) A merge could have happened, so we could currently be on a leaf
3729 * that holds what we were copying in the first place.
3730 * 2) A split could have happened, and now not all of the items we want
3731 * are on the same leaf.
3732 *
3733 * So we need to adjust how we search for holes, we need to drop the
3734 * path and re-search for the first extent key we found, and then walk
3735 * forward until we hit the last one we copied.
3736 */
3737 if (need_find_last_extent) {
3738 /* btrfs_prev_leaf could return 1 without releasing the path */
3739 btrfs_release_path(src_path);
3740 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3741 src_path, 0, 0);
3742 if (ret < 0)
3743 return ret;
3744 ASSERT(ret == 0);
3745 src = src_path->nodes[0];
3746 i = src_path->slots[0];
3747 } else {
3748 i = start_slot;
3749 }
3750
3751 /*
3752 * Ok so here we need to go through and fill in any holes we may have
3753 * to make sure that holes are punched for those areas in case they had
3754 * extents previously.
3755 */
3756 while (!done) {
3757 u64 offset, len;
3758 u64 extent_end;
3759
3760 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3761 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3762 if (ret < 0)
3763 return ret;
3764 ASSERT(ret == 0);
3765 src = src_path->nodes[0];
3766 i = 0;
3767 }
3768
3769 btrfs_item_key_to_cpu(src, &key, i);
3770 if (!btrfs_comp_cpu_keys(&key, &last_key))
3771 done = true;
3772 if (key.objectid != btrfs_ino(inode) ||
3773 key.type != BTRFS_EXTENT_DATA_KEY) {
3774 i++;
3775 continue;
3776 }
3777 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3778 if (btrfs_file_extent_type(src, extent) ==
3779 BTRFS_FILE_EXTENT_INLINE) {
514ac8ad 3780 len = btrfs_file_extent_inline_len(src, i, extent);
16e7549f
JB
3781 extent_end = ALIGN(key.offset + len, log->sectorsize);
3782 } else {
3783 len = btrfs_file_extent_num_bytes(src, extent);
3784 extent_end = key.offset + len;
3785 }
3786 i++;
3787
3788 if (*last_extent == key.offset) {
3789 *last_extent = extent_end;
3790 continue;
3791 }
3792 offset = *last_extent;
3793 len = key.offset - *last_extent;
3794 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3795 offset, 0, 0, len, 0, len, 0,
3796 0, 0);
3797 if (ret)
3798 break;
74121f7c 3799 *last_extent = extent_end;
16e7549f
JB
3800 }
3801 /*
3802 * Need to let the callers know we dropped the path so they should
3803 * re-search.
3804 */
3805 if (!ret && need_find_last_extent)
3806 ret = 1;
4a500fd1 3807 return ret;
31ff1cd2
CM
3808}
3809
5dc562c5
JB
3810static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3811{
3812 struct extent_map *em1, *em2;
3813
3814 em1 = list_entry(a, struct extent_map, list);
3815 em2 = list_entry(b, struct extent_map, list);
3816
3817 if (em1->start < em2->start)
3818 return -1;
3819 else if (em1->start > em2->start)
3820 return 1;
3821 return 0;
3822}
3823
8407f553
FM
3824static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3825 struct inode *inode,
3826 struct btrfs_root *root,
3827 const struct extent_map *em,
3828 const struct list_head *logged_list,
3829 bool *ordered_io_error)
5dc562c5 3830{
2ab28f32 3831 struct btrfs_ordered_extent *ordered;
8407f553 3832 struct btrfs_root *log = root->log_root;
2ab28f32
JB
3833 u64 mod_start = em->mod_start;
3834 u64 mod_len = em->mod_len;
8407f553 3835 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2ab28f32
JB
3836 u64 csum_offset;
3837 u64 csum_len;
8407f553
FM
3838 LIST_HEAD(ordered_sums);
3839 int ret = 0;
0aa4a17d 3840
8407f553 3841 *ordered_io_error = false;
0aa4a17d 3842
8407f553
FM
3843 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3844 em->block_start == EXTENT_MAP_HOLE)
70c8a91c 3845 return 0;
5dc562c5 3846
2ab28f32 3847 /*
8407f553
FM
3848 * Wait far any ordered extent that covers our extent map. If it
3849 * finishes without an error, first check and see if our csums are on
3850 * our outstanding ordered extents.
2ab28f32 3851 */
827463c4 3852 list_for_each_entry(ordered, logged_list, log_list) {
2ab28f32
JB
3853 struct btrfs_ordered_sum *sum;
3854
3855 if (!mod_len)
3856 break;
3857
2ab28f32
JB
3858 if (ordered->file_offset + ordered->len <= mod_start ||
3859 mod_start + mod_len <= ordered->file_offset)
3860 continue;
3861
8407f553
FM
3862 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3863 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3864 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3865 const u64 start = ordered->file_offset;
3866 const u64 end = ordered->file_offset + ordered->len - 1;
3867
3868 WARN_ON(ordered->inode != inode);
3869 filemap_fdatawrite_range(inode->i_mapping, start, end);
3870 }
3871
3872 wait_event(ordered->wait,
3873 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3874 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3875
3876 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
b38ef71c
FM
3877 /*
3878 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3879 * i_mapping flags, so that the next fsync won't get
3880 * an outdated io error too.
3881 */
3882 btrfs_inode_check_errors(inode);
8407f553
FM
3883 *ordered_io_error = true;
3884 break;
3885 }
2ab28f32
JB
3886 /*
3887 * We are going to copy all the csums on this ordered extent, so
3888 * go ahead and adjust mod_start and mod_len in case this
3889 * ordered extent has already been logged.
3890 */
3891 if (ordered->file_offset > mod_start) {
3892 if (ordered->file_offset + ordered->len >=
3893 mod_start + mod_len)
3894 mod_len = ordered->file_offset - mod_start;
3895 /*
3896 * If we have this case
3897 *
3898 * |--------- logged extent ---------|
3899 * |----- ordered extent ----|
3900 *
3901 * Just don't mess with mod_start and mod_len, we'll
3902 * just end up logging more csums than we need and it
3903 * will be ok.
3904 */
3905 } else {
3906 if (ordered->file_offset + ordered->len <
3907 mod_start + mod_len) {
3908 mod_len = (mod_start + mod_len) -
3909 (ordered->file_offset + ordered->len);
3910 mod_start = ordered->file_offset +
3911 ordered->len;
3912 } else {
3913 mod_len = 0;
3914 }
3915 }
3916
8407f553
FM
3917 if (skip_csum)
3918 continue;
3919
2ab28f32
JB
3920 /*
3921 * To keep us from looping for the above case of an ordered
3922 * extent that falls inside of the logged extent.
3923 */
3924 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3925 &ordered->flags))
3926 continue;
2ab28f32 3927
2ab28f32
JB
3928 list_for_each_entry(sum, &ordered->list, list) {
3929 ret = btrfs_csum_file_blocks(trans, log, sum);
827463c4 3930 if (ret)
8407f553 3931 break;
2ab28f32 3932 }
2ab28f32 3933 }
2ab28f32 3934
8407f553 3935 if (*ordered_io_error || !mod_len || ret || skip_csum)
2ab28f32
JB
3936 return ret;
3937
488111aa
FDBM
3938 if (em->compress_type) {
3939 csum_offset = 0;
8407f553 3940 csum_len = max(em->block_len, em->orig_block_len);
488111aa
FDBM
3941 } else {
3942 csum_offset = mod_start - em->start;
3943 csum_len = mod_len;
3944 }
2ab28f32 3945
70c8a91c
JB
3946 /* block start is already adjusted for the file extent offset. */
3947 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3948 em->block_start + csum_offset,
3949 em->block_start + csum_offset +
3950 csum_len - 1, &ordered_sums, 0);
3951 if (ret)
3952 return ret;
5dc562c5 3953
70c8a91c
JB
3954 while (!list_empty(&ordered_sums)) {
3955 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3956 struct btrfs_ordered_sum,
3957 list);
3958 if (!ret)
3959 ret = btrfs_csum_file_blocks(trans, log, sums);
3960 list_del(&sums->list);
3961 kfree(sums);
5dc562c5
JB
3962 }
3963
70c8a91c 3964 return ret;
5dc562c5
JB
3965}
3966
8407f553
FM
3967static int log_one_extent(struct btrfs_trans_handle *trans,
3968 struct inode *inode, struct btrfs_root *root,
3969 const struct extent_map *em,
3970 struct btrfs_path *path,
3971 const struct list_head *logged_list,
3972 struct btrfs_log_ctx *ctx)
3973{
3974 struct btrfs_root *log = root->log_root;
3975 struct btrfs_file_extent_item *fi;
3976 struct extent_buffer *leaf;
3977 struct btrfs_map_token token;
3978 struct btrfs_key key;
3979 u64 extent_offset = em->start - em->orig_start;
3980 u64 block_len;
3981 int ret;
3982 int extent_inserted = 0;
3983 bool ordered_io_err = false;
3984
3985 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
3986 &ordered_io_err);
3987 if (ret)
3988 return ret;
3989
3990 if (ordered_io_err) {
3991 ctx->io_err = -EIO;
3992 return 0;
3993 }
3994
3995 btrfs_init_map_token(&token);
3996
3997 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3998 em->start + em->len, NULL, 0, 1,
3999 sizeof(*fi), &extent_inserted);
4000 if (ret)
4001 return ret;
4002
4003 if (!extent_inserted) {
4004 key.objectid = btrfs_ino(inode);
4005 key.type = BTRFS_EXTENT_DATA_KEY;
4006 key.offset = em->start;
4007
4008 ret = btrfs_insert_empty_item(trans, log, path, &key,
4009 sizeof(*fi));
4010 if (ret)
4011 return ret;
4012 }
4013 leaf = path->nodes[0];
4014 fi = btrfs_item_ptr(leaf, path->slots[0],
4015 struct btrfs_file_extent_item);
4016
50d9aa99 4017 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
8407f553
FM
4018 &token);
4019 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4020 btrfs_set_token_file_extent_type(leaf, fi,
4021 BTRFS_FILE_EXTENT_PREALLOC,
4022 &token);
4023 else
4024 btrfs_set_token_file_extent_type(leaf, fi,
4025 BTRFS_FILE_EXTENT_REG,
4026 &token);
4027
4028 block_len = max(em->block_len, em->orig_block_len);
4029 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4030 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4031 em->block_start,
4032 &token);
4033 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4034 &token);
4035 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4036 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4037 em->block_start -
4038 extent_offset, &token);
4039 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4040 &token);
4041 } else {
4042 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4043 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4044 &token);
4045 }
4046
4047 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4048 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4049 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4050 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4051 &token);
4052 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4053 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4054 btrfs_mark_buffer_dirty(leaf);
4055
4056 btrfs_release_path(path);
4057
4058 return ret;
4059}
4060
5dc562c5
JB
4061static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4062 struct btrfs_root *root,
4063 struct inode *inode,
827463c4 4064 struct btrfs_path *path,
8407f553
FM
4065 struct list_head *logged_list,
4066 struct btrfs_log_ctx *ctx)
5dc562c5 4067{
5dc562c5
JB
4068 struct extent_map *em, *n;
4069 struct list_head extents;
4070 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4071 u64 test_gen;
4072 int ret = 0;
2ab28f32 4073 int num = 0;
5dc562c5
JB
4074
4075 INIT_LIST_HEAD(&extents);
4076
5dc562c5
JB
4077 write_lock(&tree->lock);
4078 test_gen = root->fs_info->last_trans_committed;
4079
4080 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4081 list_del_init(&em->list);
2ab28f32
JB
4082
4083 /*
4084 * Just an arbitrary number, this can be really CPU intensive
4085 * once we start getting a lot of extents, and really once we
4086 * have a bunch of extents we just want to commit since it will
4087 * be faster.
4088 */
4089 if (++num > 32768) {
4090 list_del_init(&tree->modified_extents);
4091 ret = -EFBIG;
4092 goto process;
4093 }
4094
5dc562c5
JB
4095 if (em->generation <= test_gen)
4096 continue;
ff44c6e3
JB
4097 /* Need a ref to keep it from getting evicted from cache */
4098 atomic_inc(&em->refs);
4099 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
5dc562c5 4100 list_add_tail(&em->list, &extents);
2ab28f32 4101 num++;
5dc562c5
JB
4102 }
4103
4104 list_sort(NULL, &extents, extent_cmp);
4105
2ab28f32 4106process:
5dc562c5
JB
4107 while (!list_empty(&extents)) {
4108 em = list_entry(extents.next, struct extent_map, list);
4109
4110 list_del_init(&em->list);
4111
4112 /*
4113 * If we had an error we just need to delete everybody from our
4114 * private list.
4115 */
ff44c6e3 4116 if (ret) {
201a9038 4117 clear_em_logging(tree, em);
ff44c6e3 4118 free_extent_map(em);
5dc562c5 4119 continue;
ff44c6e3
JB
4120 }
4121
4122 write_unlock(&tree->lock);
5dc562c5 4123
8407f553
FM
4124 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4125 ctx);
ff44c6e3 4126 write_lock(&tree->lock);
201a9038
JB
4127 clear_em_logging(tree, em);
4128 free_extent_map(em);
5dc562c5 4129 }
ff44c6e3
JB
4130 WARN_ON(!list_empty(&extents));
4131 write_unlock(&tree->lock);
5dc562c5 4132
5dc562c5 4133 btrfs_release_path(path);
5dc562c5
JB
4134 return ret;
4135}
4136
1a4bcf47
FM
4137static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4138 struct btrfs_path *path, u64 *size_ret)
4139{
4140 struct btrfs_key key;
4141 int ret;
4142
4143 key.objectid = btrfs_ino(inode);
4144 key.type = BTRFS_INODE_ITEM_KEY;
4145 key.offset = 0;
4146
4147 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4148 if (ret < 0) {
4149 return ret;
4150 } else if (ret > 0) {
2f2ff0ee 4151 *size_ret = 0;
1a4bcf47
FM
4152 } else {
4153 struct btrfs_inode_item *item;
4154
4155 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4156 struct btrfs_inode_item);
4157 *size_ret = btrfs_inode_size(path->nodes[0], item);
4158 }
4159
4160 btrfs_release_path(path);
4161 return 0;
4162}
4163
36283bf7
FM
4164/*
4165 * At the moment we always log all xattrs. This is to figure out at log replay
4166 * time which xattrs must have their deletion replayed. If a xattr is missing
4167 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4168 * because if a xattr is deleted, the inode is fsynced and a power failure
4169 * happens, causing the log to be replayed the next time the fs is mounted,
4170 * we want the xattr to not exist anymore (same behaviour as other filesystems
4171 * with a journal, ext3/4, xfs, f2fs, etc).
4172 */
4173static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4174 struct btrfs_root *root,
4175 struct inode *inode,
4176 struct btrfs_path *path,
4177 struct btrfs_path *dst_path)
4178{
4179 int ret;
4180 struct btrfs_key key;
4181 const u64 ino = btrfs_ino(inode);
4182 int ins_nr = 0;
4183 int start_slot = 0;
4184
4185 key.objectid = ino;
4186 key.type = BTRFS_XATTR_ITEM_KEY;
4187 key.offset = 0;
4188
4189 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4190 if (ret < 0)
4191 return ret;
4192
4193 while (true) {
4194 int slot = path->slots[0];
4195 struct extent_buffer *leaf = path->nodes[0];
4196 int nritems = btrfs_header_nritems(leaf);
4197
4198 if (slot >= nritems) {
4199 if (ins_nr > 0) {
4200 u64 last_extent = 0;
4201
4202 ret = copy_items(trans, inode, dst_path, path,
4203 &last_extent, start_slot,
4204 ins_nr, 1, 0);
4205 /* can't be 1, extent items aren't processed */
4206 ASSERT(ret <= 0);
4207 if (ret < 0)
4208 return ret;
4209 ins_nr = 0;
4210 }
4211 ret = btrfs_next_leaf(root, path);
4212 if (ret < 0)
4213 return ret;
4214 else if (ret > 0)
4215 break;
4216 continue;
4217 }
4218
4219 btrfs_item_key_to_cpu(leaf, &key, slot);
4220 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4221 break;
4222
4223 if (ins_nr == 0)
4224 start_slot = slot;
4225 ins_nr++;
4226 path->slots[0]++;
4227 cond_resched();
4228 }
4229 if (ins_nr > 0) {
4230 u64 last_extent = 0;
4231
4232 ret = copy_items(trans, inode, dst_path, path,
4233 &last_extent, start_slot,
4234 ins_nr, 1, 0);
4235 /* can't be 1, extent items aren't processed */
4236 ASSERT(ret <= 0);
4237 if (ret < 0)
4238 return ret;
4239 }
4240
4241 return 0;
4242}
4243
a89ca6f2
FM
4244/*
4245 * If the no holes feature is enabled we need to make sure any hole between the
4246 * last extent and the i_size of our inode is explicitly marked in the log. This
4247 * is to make sure that doing something like:
4248 *
4249 * 1) create file with 128Kb of data
4250 * 2) truncate file to 64Kb
4251 * 3) truncate file to 256Kb
4252 * 4) fsync file
4253 * 5) <crash/power failure>
4254 * 6) mount fs and trigger log replay
4255 *
4256 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4257 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4258 * file correspond to a hole. The presence of explicit holes in a log tree is
4259 * what guarantees that log replay will remove/adjust file extent items in the
4260 * fs/subvol tree.
4261 *
4262 * Here we do not need to care about holes between extents, that is already done
4263 * by copy_items(). We also only need to do this in the full sync path, where we
4264 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4265 * lookup the list of modified extent maps and if any represents a hole, we
4266 * insert a corresponding extent representing a hole in the log tree.
4267 */
4268static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4269 struct btrfs_root *root,
4270 struct inode *inode,
4271 struct btrfs_path *path)
4272{
4273 int ret;
4274 struct btrfs_key key;
4275 u64 hole_start;
4276 u64 hole_size;
4277 struct extent_buffer *leaf;
4278 struct btrfs_root *log = root->log_root;
4279 const u64 ino = btrfs_ino(inode);
4280 const u64 i_size = i_size_read(inode);
4281
4282 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4283 return 0;
4284
4285 key.objectid = ino;
4286 key.type = BTRFS_EXTENT_DATA_KEY;
4287 key.offset = (u64)-1;
4288
4289 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4290 ASSERT(ret != 0);
4291 if (ret < 0)
4292 return ret;
4293
4294 ASSERT(path->slots[0] > 0);
4295 path->slots[0]--;
4296 leaf = path->nodes[0];
4297 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4298
4299 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4300 /* inode does not have any extents */
4301 hole_start = 0;
4302 hole_size = i_size;
4303 } else {
4304 struct btrfs_file_extent_item *extent;
4305 u64 len;
4306
4307 /*
4308 * If there's an extent beyond i_size, an explicit hole was
4309 * already inserted by copy_items().
4310 */
4311 if (key.offset >= i_size)
4312 return 0;
4313
4314 extent = btrfs_item_ptr(leaf, path->slots[0],
4315 struct btrfs_file_extent_item);
4316
4317 if (btrfs_file_extent_type(leaf, extent) ==
4318 BTRFS_FILE_EXTENT_INLINE) {
4319 len = btrfs_file_extent_inline_len(leaf,
4320 path->slots[0],
4321 extent);
4322 ASSERT(len == i_size);
4323 return 0;
4324 }
4325
4326 len = btrfs_file_extent_num_bytes(leaf, extent);
4327 /* Last extent goes beyond i_size, no need to log a hole. */
4328 if (key.offset + len > i_size)
4329 return 0;
4330 hole_start = key.offset + len;
4331 hole_size = i_size - hole_start;
4332 }
4333 btrfs_release_path(path);
4334
4335 /* Last extent ends at i_size. */
4336 if (hole_size == 0)
4337 return 0;
4338
4339 hole_size = ALIGN(hole_size, root->sectorsize);
4340 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4341 hole_size, 0, hole_size, 0, 0, 0);
4342 return ret;
4343}
4344
e02119d5
CM
4345/* log a single inode in the tree log.
4346 * At least one parent directory for this inode must exist in the tree
4347 * or be logged already.
4348 *
4349 * Any items from this inode changed by the current transaction are copied
4350 * to the log tree. An extra reference is taken on any extents in this
4351 * file, allowing us to avoid a whole pile of corner cases around logging
4352 * blocks that have been removed from the tree.
4353 *
4354 * See LOG_INODE_ALL and related defines for a description of what inode_only
4355 * does.
4356 *
4357 * This handles both files and directories.
4358 */
12fcfd22 4359static int btrfs_log_inode(struct btrfs_trans_handle *trans,
49dae1bc
FM
4360 struct btrfs_root *root, struct inode *inode,
4361 int inode_only,
4362 const loff_t start,
8407f553
FM
4363 const loff_t end,
4364 struct btrfs_log_ctx *ctx)
e02119d5
CM
4365{
4366 struct btrfs_path *path;
4367 struct btrfs_path *dst_path;
4368 struct btrfs_key min_key;
4369 struct btrfs_key max_key;
4370 struct btrfs_root *log = root->log_root;
31ff1cd2 4371 struct extent_buffer *src = NULL;
827463c4 4372 LIST_HEAD(logged_list);
16e7549f 4373 u64 last_extent = 0;
4a500fd1 4374 int err = 0;
e02119d5 4375 int ret;
3a5f1d45 4376 int nritems;
31ff1cd2
CM
4377 int ins_start_slot = 0;
4378 int ins_nr;
5dc562c5 4379 bool fast_search = false;
33345d01 4380 u64 ino = btrfs_ino(inode);
49dae1bc 4381 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1a4bcf47 4382 u64 logged_isize = 0;
e4545de5 4383 bool need_log_inode_item = true;
e02119d5 4384
e02119d5 4385 path = btrfs_alloc_path();
5df67083
TI
4386 if (!path)
4387 return -ENOMEM;
e02119d5 4388 dst_path = btrfs_alloc_path();
5df67083
TI
4389 if (!dst_path) {
4390 btrfs_free_path(path);
4391 return -ENOMEM;
4392 }
e02119d5 4393
33345d01 4394 min_key.objectid = ino;
e02119d5
CM
4395 min_key.type = BTRFS_INODE_ITEM_KEY;
4396 min_key.offset = 0;
4397
33345d01 4398 max_key.objectid = ino;
12fcfd22 4399
12fcfd22 4400
5dc562c5 4401 /* today the code can only do partial logging of directories */
5269b67e
MX
4402 if (S_ISDIR(inode->i_mode) ||
4403 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4404 &BTRFS_I(inode)->runtime_flags) &&
4405 inode_only == LOG_INODE_EXISTS))
e02119d5
CM
4406 max_key.type = BTRFS_XATTR_ITEM_KEY;
4407 else
4408 max_key.type = (u8)-1;
4409 max_key.offset = (u64)-1;
4410
2c2c452b
FM
4411 /*
4412 * Only run delayed items if we are a dir or a new file.
4413 * Otherwise commit the delayed inode only, which is needed in
4414 * order for the log replay code to mark inodes for link count
4415 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4416 */
94edf4ae 4417 if (S_ISDIR(inode->i_mode) ||
2c2c452b 4418 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
94edf4ae 4419 ret = btrfs_commit_inode_delayed_items(trans, inode);
2c2c452b
FM
4420 else
4421 ret = btrfs_commit_inode_delayed_inode(inode);
4422
4423 if (ret) {
4424 btrfs_free_path(path);
4425 btrfs_free_path(dst_path);
4426 return ret;
16cdcec7
MX
4427 }
4428
e02119d5
CM
4429 mutex_lock(&BTRFS_I(inode)->log_mutex);
4430
0870295b 4431 btrfs_get_logged_extents(inode, &logged_list, start, end);
2ab28f32 4432
e02119d5
CM
4433 /*
4434 * a brute force approach to making sure we get the most uptodate
4435 * copies of everything.
4436 */
4437 if (S_ISDIR(inode->i_mode)) {
4438 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4439
4f764e51
FM
4440 if (inode_only == LOG_INODE_EXISTS)
4441 max_key_type = BTRFS_XATTR_ITEM_KEY;
33345d01 4442 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
e02119d5 4443 } else {
1a4bcf47
FM
4444 if (inode_only == LOG_INODE_EXISTS) {
4445 /*
4446 * Make sure the new inode item we write to the log has
4447 * the same isize as the current one (if it exists).
4448 * This is necessary to prevent data loss after log
4449 * replay, and also to prevent doing a wrong expanding
4450 * truncate - for e.g. create file, write 4K into offset
4451 * 0, fsync, write 4K into offset 4096, add hard link,
4452 * fsync some other file (to sync log), power fail - if
4453 * we use the inode's current i_size, after log replay
4454 * we get a 8Kb file, with the last 4Kb extent as a hole
4455 * (zeroes), as if an expanding truncate happened,
4456 * instead of getting a file of 4Kb only.
4457 */
4458 err = logged_inode_size(log, inode, path,
4459 &logged_isize);
4460 if (err)
4461 goto out_unlock;
4462 }
a742994a
FM
4463 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4464 &BTRFS_I(inode)->runtime_flags)) {
4465 if (inode_only == LOG_INODE_EXISTS) {
4f764e51 4466 max_key.type = BTRFS_XATTR_ITEM_KEY;
a742994a
FM
4467 ret = drop_objectid_items(trans, log, path, ino,
4468 max_key.type);
4469 } else {
4470 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4471 &BTRFS_I(inode)->runtime_flags);
4472 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4473 &BTRFS_I(inode)->runtime_flags);
28ed1345
CM
4474 while(1) {
4475 ret = btrfs_truncate_inode_items(trans,
4476 log, inode, 0, 0);
4477 if (ret != -EAGAIN)
4478 break;
4479 }
a742994a 4480 }
4f764e51
FM
4481 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4482 &BTRFS_I(inode)->runtime_flags) ||
6cfab851 4483 inode_only == LOG_INODE_EXISTS) {
4f764e51 4484 if (inode_only == LOG_INODE_ALL)
183f37fa 4485 fast_search = true;
4f764e51 4486 max_key.type = BTRFS_XATTR_ITEM_KEY;
5dc562c5 4487 ret = drop_objectid_items(trans, log, path, ino,
e9976151 4488 max_key.type);
a95249b3
JB
4489 } else {
4490 if (inode_only == LOG_INODE_ALL)
4491 fast_search = true;
a95249b3 4492 goto log_extents;
5dc562c5 4493 }
a95249b3 4494
e02119d5 4495 }
4a500fd1
YZ
4496 if (ret) {
4497 err = ret;
4498 goto out_unlock;
4499 }
e02119d5 4500
d397712b 4501 while (1) {
31ff1cd2 4502 ins_nr = 0;
6174d3cb 4503 ret = btrfs_search_forward(root, &min_key,
de78b51a 4504 path, trans->transid);
e02119d5
CM
4505 if (ret != 0)
4506 break;
3a5f1d45 4507again:
31ff1cd2 4508 /* note, ins_nr might be > 0 here, cleanup outside the loop */
33345d01 4509 if (min_key.objectid != ino)
e02119d5
CM
4510 break;
4511 if (min_key.type > max_key.type)
4512 break;
31ff1cd2 4513
e4545de5
FM
4514 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4515 need_log_inode_item = false;
4516
36283bf7
FM
4517 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4518 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4519 if (ins_nr == 0)
4520 goto next_slot;
4521 ret = copy_items(trans, inode, dst_path, path,
4522 &last_extent, ins_start_slot,
4523 ins_nr, inode_only, logged_isize);
4524 if (ret < 0) {
4525 err = ret;
4526 goto out_unlock;
4527 }
4528 ins_nr = 0;
4529 if (ret) {
4530 btrfs_release_path(path);
4531 continue;
4532 }
4533 goto next_slot;
4534 }
4535
e02119d5 4536 src = path->nodes[0];
31ff1cd2
CM
4537 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4538 ins_nr++;
4539 goto next_slot;
4540 } else if (!ins_nr) {
4541 ins_start_slot = path->slots[0];
4542 ins_nr = 1;
4543 goto next_slot;
e02119d5
CM
4544 }
4545
16e7549f 4546 ret = copy_items(trans, inode, dst_path, path, &last_extent,
1a4bcf47
FM
4547 ins_start_slot, ins_nr, inode_only,
4548 logged_isize);
16e7549f 4549 if (ret < 0) {
4a500fd1
YZ
4550 err = ret;
4551 goto out_unlock;
a71db86e
RV
4552 }
4553 if (ret) {
16e7549f
JB
4554 ins_nr = 0;
4555 btrfs_release_path(path);
4556 continue;
4a500fd1 4557 }
31ff1cd2
CM
4558 ins_nr = 1;
4559 ins_start_slot = path->slots[0];
4560next_slot:
e02119d5 4561
3a5f1d45
CM
4562 nritems = btrfs_header_nritems(path->nodes[0]);
4563 path->slots[0]++;
4564 if (path->slots[0] < nritems) {
4565 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4566 path->slots[0]);
4567 goto again;
4568 }
31ff1cd2 4569 if (ins_nr) {
16e7549f
JB
4570 ret = copy_items(trans, inode, dst_path, path,
4571 &last_extent, ins_start_slot,
1a4bcf47 4572 ins_nr, inode_only, logged_isize);
16e7549f 4573 if (ret < 0) {
4a500fd1
YZ
4574 err = ret;
4575 goto out_unlock;
4576 }
16e7549f 4577 ret = 0;
31ff1cd2
CM
4578 ins_nr = 0;
4579 }
b3b4aa74 4580 btrfs_release_path(path);
3a5f1d45 4581
3d41d702 4582 if (min_key.offset < (u64)-1) {
e02119d5 4583 min_key.offset++;
3d41d702 4584 } else if (min_key.type < max_key.type) {
e02119d5 4585 min_key.type++;
3d41d702
FDBM
4586 min_key.offset = 0;
4587 } else {
e02119d5 4588 break;
3d41d702 4589 }
e02119d5 4590 }
31ff1cd2 4591 if (ins_nr) {
16e7549f 4592 ret = copy_items(trans, inode, dst_path, path, &last_extent,
1a4bcf47
FM
4593 ins_start_slot, ins_nr, inode_only,
4594 logged_isize);
16e7549f 4595 if (ret < 0) {
4a500fd1
YZ
4596 err = ret;
4597 goto out_unlock;
4598 }
16e7549f 4599 ret = 0;
31ff1cd2
CM
4600 ins_nr = 0;
4601 }
5dc562c5 4602
36283bf7
FM
4603 btrfs_release_path(path);
4604 btrfs_release_path(dst_path);
4605 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4606 if (err)
4607 goto out_unlock;
a89ca6f2
FM
4608 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4609 btrfs_release_path(path);
4610 btrfs_release_path(dst_path);
4611 err = btrfs_log_trailing_hole(trans, root, inode, path);
4612 if (err)
4613 goto out_unlock;
4614 }
a95249b3 4615log_extents:
f3b15ccd
JB
4616 btrfs_release_path(path);
4617 btrfs_release_path(dst_path);
e4545de5
FM
4618 if (need_log_inode_item) {
4619 err = log_inode_item(trans, log, dst_path, inode);
4620 if (err)
4621 goto out_unlock;
4622 }
5dc562c5 4623 if (fast_search) {
b38ef71c
FM
4624 /*
4625 * Some ordered extents started by fsync might have completed
4626 * before we collected the ordered extents in logged_list, which
4627 * means they're gone, not in our logged_list nor in the inode's
4628 * ordered tree. We want the application/user space to know an
4629 * error happened while attempting to persist file data so that
4630 * it can take proper action. If such error happened, we leave
4631 * without writing to the log tree and the fsync must report the
4632 * file data write error and not commit the current transaction.
4633 */
4634 err = btrfs_inode_check_errors(inode);
4635 if (err) {
4636 ctx->io_err = err;
4637 goto out_unlock;
4638 }
827463c4 4639 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
8407f553 4640 &logged_list, ctx);
5dc562c5
JB
4641 if (ret) {
4642 err = ret;
4643 goto out_unlock;
4644 }
d006a048 4645 } else if (inode_only == LOG_INODE_ALL) {
06d3d22b
LB
4646 struct extent_map *em, *n;
4647
49dae1bc
FM
4648 write_lock(&em_tree->lock);
4649 /*
4650 * We can't just remove every em if we're called for a ranged
4651 * fsync - that is, one that doesn't cover the whole possible
4652 * file range (0 to LLONG_MAX). This is because we can have
4653 * em's that fall outside the range we're logging and therefore
4654 * their ordered operations haven't completed yet
4655 * (btrfs_finish_ordered_io() not invoked yet). This means we
4656 * didn't get their respective file extent item in the fs/subvol
4657 * tree yet, and need to let the next fast fsync (one which
4658 * consults the list of modified extent maps) find the em so
4659 * that it logs a matching file extent item and waits for the
4660 * respective ordered operation to complete (if it's still
4661 * running).
4662 *
4663 * Removing every em outside the range we're logging would make
4664 * the next fast fsync not log their matching file extent items,
4665 * therefore making us lose data after a log replay.
4666 */
4667 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4668 list) {
4669 const u64 mod_end = em->mod_start + em->mod_len - 1;
4670
4671 if (em->mod_start >= start && mod_end <= end)
4672 list_del_init(&em->list);
4673 }
4674 write_unlock(&em_tree->lock);
5dc562c5
JB
4675 }
4676
9623f9a3 4677 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2f2ff0ee
FM
4678 ret = log_directory_changes(trans, root, inode, path, dst_path,
4679 ctx);
4a500fd1
YZ
4680 if (ret) {
4681 err = ret;
4682 goto out_unlock;
4683 }
e02119d5 4684 }
49dae1bc 4685
2f2ff0ee 4686 spin_lock(&BTRFS_I(inode)->lock);
125c4cf9
FM
4687 BTRFS_I(inode)->logged_trans = trans->transid;
4688 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
2f2ff0ee 4689 spin_unlock(&BTRFS_I(inode)->lock);
4a500fd1 4690out_unlock:
827463c4
MX
4691 if (unlikely(err))
4692 btrfs_put_logged_extents(&logged_list);
4693 else
4694 btrfs_submit_logged_extents(&logged_list, log);
e02119d5
CM
4695 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4696
4697 btrfs_free_path(path);
4698 btrfs_free_path(dst_path);
4a500fd1 4699 return err;
e02119d5
CM
4700}
4701
12fcfd22
CM
4702/*
4703 * follow the dentry parent pointers up the chain and see if any
4704 * of the directories in it require a full commit before they can
4705 * be logged. Returns zero if nothing special needs to be done or 1 if
4706 * a full commit is required.
4707 */
4708static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4709 struct inode *inode,
4710 struct dentry *parent,
4711 struct super_block *sb,
4712 u64 last_committed)
e02119d5 4713{
12fcfd22
CM
4714 int ret = 0;
4715 struct btrfs_root *root;
6a912213 4716 struct dentry *old_parent = NULL;
de2b530b 4717 struct inode *orig_inode = inode;
e02119d5 4718
af4176b4
CM
4719 /*
4720 * for regular files, if its inode is already on disk, we don't
4721 * have to worry about the parents at all. This is because
4722 * we can use the last_unlink_trans field to record renames
4723 * and other fun in this file.
4724 */
4725 if (S_ISREG(inode->i_mode) &&
4726 BTRFS_I(inode)->generation <= last_committed &&
4727 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4728 goto out;
4729
12fcfd22 4730 if (!S_ISDIR(inode->i_mode)) {
2b0143b5 4731 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
12fcfd22 4732 goto out;
2b0143b5 4733 inode = d_inode(parent);
12fcfd22
CM
4734 }
4735
4736 while (1) {
de2b530b
JB
4737 /*
4738 * If we are logging a directory then we start with our inode,
4739 * not our parents inode, so we need to skipp setting the
4740 * logged_trans so that further down in the log code we don't
4741 * think this inode has already been logged.
4742 */
4743 if (inode != orig_inode)
4744 BTRFS_I(inode)->logged_trans = trans->transid;
12fcfd22
CM
4745 smp_mb();
4746
4747 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4748 root = BTRFS_I(inode)->root;
4749
4750 /*
4751 * make sure any commits to the log are forced
4752 * to be full commits
4753 */
995946dd 4754 btrfs_set_log_full_commit(root->fs_info, trans);
12fcfd22
CM
4755 ret = 1;
4756 break;
4757 }
4758
2b0143b5 4759 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
12fcfd22
CM
4760 break;
4761
76dda93c 4762 if (IS_ROOT(parent))
12fcfd22
CM
4763 break;
4764
6a912213
JB
4765 parent = dget_parent(parent);
4766 dput(old_parent);
4767 old_parent = parent;
2b0143b5 4768 inode = d_inode(parent);
12fcfd22
CM
4769
4770 }
6a912213 4771 dput(old_parent);
12fcfd22 4772out:
e02119d5
CM
4773 return ret;
4774}
4775
2f2ff0ee
FM
4776struct btrfs_dir_list {
4777 u64 ino;
4778 struct list_head list;
4779};
4780
4781/*
4782 * Log the inodes of the new dentries of a directory. See log_dir_items() for
4783 * details about the why it is needed.
4784 * This is a recursive operation - if an existing dentry corresponds to a
4785 * directory, that directory's new entries are logged too (same behaviour as
4786 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
4787 * the dentries point to we do not lock their i_mutex, otherwise lockdep
4788 * complains about the following circular lock dependency / possible deadlock:
4789 *
4790 * CPU0 CPU1
4791 * ---- ----
4792 * lock(&type->i_mutex_dir_key#3/2);
4793 * lock(sb_internal#2);
4794 * lock(&type->i_mutex_dir_key#3/2);
4795 * lock(&sb->s_type->i_mutex_key#14);
4796 *
4797 * Where sb_internal is the lock (a counter that works as a lock) acquired by
4798 * sb_start_intwrite() in btrfs_start_transaction().
4799 * Not locking i_mutex of the inodes is still safe because:
4800 *
4801 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
4802 * that while logging the inode new references (names) are added or removed
4803 * from the inode, leaving the logged inode item with a link count that does
4804 * not match the number of logged inode reference items. This is fine because
4805 * at log replay time we compute the real number of links and correct the
4806 * link count in the inode item (see replay_one_buffer() and
4807 * link_to_fixup_dir());
4808 *
4809 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
4810 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
4811 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
4812 * has a size that doesn't match the sum of the lengths of all the logged
4813 * names. This does not result in a problem because if a dir_item key is
4814 * logged but its matching dir_index key is not logged, at log replay time we
4815 * don't use it to replay the respective name (see replay_one_name()). On the
4816 * other hand if only the dir_index key ends up being logged, the respective
4817 * name is added to the fs/subvol tree with both the dir_item and dir_index
4818 * keys created (see replay_one_name()).
4819 * The directory's inode item with a wrong i_size is not a problem as well,
4820 * since we don't use it at log replay time to set the i_size in the inode
4821 * item of the fs/subvol tree (see overwrite_item()).
4822 */
4823static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
4824 struct btrfs_root *root,
4825 struct inode *start_inode,
4826 struct btrfs_log_ctx *ctx)
4827{
4828 struct btrfs_root *log = root->log_root;
4829 struct btrfs_path *path;
4830 LIST_HEAD(dir_list);
4831 struct btrfs_dir_list *dir_elem;
4832 int ret = 0;
4833
4834 path = btrfs_alloc_path();
4835 if (!path)
4836 return -ENOMEM;
4837
4838 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
4839 if (!dir_elem) {
4840 btrfs_free_path(path);
4841 return -ENOMEM;
4842 }
4843 dir_elem->ino = btrfs_ino(start_inode);
4844 list_add_tail(&dir_elem->list, &dir_list);
4845
4846 while (!list_empty(&dir_list)) {
4847 struct extent_buffer *leaf;
4848 struct btrfs_key min_key;
4849 int nritems;
4850 int i;
4851
4852 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
4853 list);
4854 if (ret)
4855 goto next_dir_inode;
4856
4857 min_key.objectid = dir_elem->ino;
4858 min_key.type = BTRFS_DIR_ITEM_KEY;
4859 min_key.offset = 0;
4860again:
4861 btrfs_release_path(path);
4862 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
4863 if (ret < 0) {
4864 goto next_dir_inode;
4865 } else if (ret > 0) {
4866 ret = 0;
4867 goto next_dir_inode;
4868 }
4869
4870process_leaf:
4871 leaf = path->nodes[0];
4872 nritems = btrfs_header_nritems(leaf);
4873 for (i = path->slots[0]; i < nritems; i++) {
4874 struct btrfs_dir_item *di;
4875 struct btrfs_key di_key;
4876 struct inode *di_inode;
4877 struct btrfs_dir_list *new_dir_elem;
4878 int log_mode = LOG_INODE_EXISTS;
4879 int type;
4880
4881 btrfs_item_key_to_cpu(leaf, &min_key, i);
4882 if (min_key.objectid != dir_elem->ino ||
4883 min_key.type != BTRFS_DIR_ITEM_KEY)
4884 goto next_dir_inode;
4885
4886 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
4887 type = btrfs_dir_type(leaf, di);
4888 if (btrfs_dir_transid(leaf, di) < trans->transid &&
4889 type != BTRFS_FT_DIR)
4890 continue;
4891 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
4892 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
4893 continue;
4894
4895 di_inode = btrfs_iget(root->fs_info->sb, &di_key,
4896 root, NULL);
4897 if (IS_ERR(di_inode)) {
4898 ret = PTR_ERR(di_inode);
4899 goto next_dir_inode;
4900 }
4901
4902 if (btrfs_inode_in_log(di_inode, trans->transid)) {
4903 iput(di_inode);
4904 continue;
4905 }
4906
4907 ctx->log_new_dentries = false;
4908 if (type == BTRFS_FT_DIR)
4909 log_mode = LOG_INODE_ALL;
4910 btrfs_release_path(path);
4911 ret = btrfs_log_inode(trans, root, di_inode,
4912 log_mode, 0, LLONG_MAX, ctx);
4913 iput(di_inode);
4914 if (ret)
4915 goto next_dir_inode;
4916 if (ctx->log_new_dentries) {
4917 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
4918 GFP_NOFS);
4919 if (!new_dir_elem) {
4920 ret = -ENOMEM;
4921 goto next_dir_inode;
4922 }
4923 new_dir_elem->ino = di_key.objectid;
4924 list_add_tail(&new_dir_elem->list, &dir_list);
4925 }
4926 break;
4927 }
4928 if (i == nritems) {
4929 ret = btrfs_next_leaf(log, path);
4930 if (ret < 0) {
4931 goto next_dir_inode;
4932 } else if (ret > 0) {
4933 ret = 0;
4934 goto next_dir_inode;
4935 }
4936 goto process_leaf;
4937 }
4938 if (min_key.offset < (u64)-1) {
4939 min_key.offset++;
4940 goto again;
4941 }
4942next_dir_inode:
4943 list_del(&dir_elem->list);
4944 kfree(dir_elem);
4945 }
4946
4947 btrfs_free_path(path);
4948 return ret;
4949}
4950
18aa0922
FM
4951static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
4952 struct inode *inode,
4953 struct btrfs_log_ctx *ctx)
4954{
4955 int ret;
4956 struct btrfs_path *path;
4957 struct btrfs_key key;
4958 struct btrfs_root *root = BTRFS_I(inode)->root;
4959 const u64 ino = btrfs_ino(inode);
4960
4961 path = btrfs_alloc_path();
4962 if (!path)
4963 return -ENOMEM;
4964 path->skip_locking = 1;
4965 path->search_commit_root = 1;
4966
4967 key.objectid = ino;
4968 key.type = BTRFS_INODE_REF_KEY;
4969 key.offset = 0;
4970 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4971 if (ret < 0)
4972 goto out;
4973
4974 while (true) {
4975 struct extent_buffer *leaf = path->nodes[0];
4976 int slot = path->slots[0];
4977 u32 cur_offset = 0;
4978 u32 item_size;
4979 unsigned long ptr;
4980
4981 if (slot >= btrfs_header_nritems(leaf)) {
4982 ret = btrfs_next_leaf(root, path);
4983 if (ret < 0)
4984 goto out;
4985 else if (ret > 0)
4986 break;
4987 continue;
4988 }
4989
4990 btrfs_item_key_to_cpu(leaf, &key, slot);
4991 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
4992 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
4993 break;
4994
4995 item_size = btrfs_item_size_nr(leaf, slot);
4996 ptr = btrfs_item_ptr_offset(leaf, slot);
4997 while (cur_offset < item_size) {
4998 struct btrfs_key inode_key;
4999 struct inode *dir_inode;
5000
5001 inode_key.type = BTRFS_INODE_ITEM_KEY;
5002 inode_key.offset = 0;
5003
5004 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5005 struct btrfs_inode_extref *extref;
5006
5007 extref = (struct btrfs_inode_extref *)
5008 (ptr + cur_offset);
5009 inode_key.objectid = btrfs_inode_extref_parent(
5010 leaf, extref);
5011 cur_offset += sizeof(*extref);
5012 cur_offset += btrfs_inode_extref_name_len(leaf,
5013 extref);
5014 } else {
5015 inode_key.objectid = key.offset;
5016 cur_offset = item_size;
5017 }
5018
5019 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
5020 root, NULL);
5021 /* If parent inode was deleted, skip it. */
5022 if (IS_ERR(dir_inode))
5023 continue;
5024
5025 ret = btrfs_log_inode(trans, root, dir_inode,
5026 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5027 iput(dir_inode);
5028 if (ret)
5029 goto out;
5030 }
5031 path->slots[0]++;
5032 }
5033 ret = 0;
5034out:
5035 btrfs_free_path(path);
5036 return ret;
5037}
5038
e02119d5
CM
5039/*
5040 * helper function around btrfs_log_inode to make sure newly created
5041 * parent directories also end up in the log. A minimal inode and backref
5042 * only logging is done of any parent directories that are older than
5043 * the last committed transaction
5044 */
48a3b636
ES
5045static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5046 struct btrfs_root *root, struct inode *inode,
49dae1bc
FM
5047 struct dentry *parent,
5048 const loff_t start,
5049 const loff_t end,
5050 int exists_only,
8b050d35 5051 struct btrfs_log_ctx *ctx)
e02119d5 5052{
12fcfd22 5053 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
e02119d5 5054 struct super_block *sb;
6a912213 5055 struct dentry *old_parent = NULL;
12fcfd22
CM
5056 int ret = 0;
5057 u64 last_committed = root->fs_info->last_trans_committed;
2f2ff0ee
FM
5058 bool log_dentries = false;
5059 struct inode *orig_inode = inode;
12fcfd22
CM
5060
5061 sb = inode->i_sb;
5062
3a5e1404
SW
5063 if (btrfs_test_opt(root, NOTREELOG)) {
5064 ret = 1;
5065 goto end_no_trans;
5066 }
5067
995946dd
MX
5068 /*
5069 * The prev transaction commit doesn't complete, we need do
5070 * full commit by ourselves.
5071 */
12fcfd22
CM
5072 if (root->fs_info->last_trans_log_full_commit >
5073 root->fs_info->last_trans_committed) {
5074 ret = 1;
5075 goto end_no_trans;
5076 }
5077
76dda93c
YZ
5078 if (root != BTRFS_I(inode)->root ||
5079 btrfs_root_refs(&root->root_item) == 0) {
5080 ret = 1;
5081 goto end_no_trans;
5082 }
5083
12fcfd22
CM
5084 ret = check_parent_dirs_for_sync(trans, inode, parent,
5085 sb, last_committed);
5086 if (ret)
5087 goto end_no_trans;
e02119d5 5088
22ee6985 5089 if (btrfs_inode_in_log(inode, trans->transid)) {
257c62e1
CM
5090 ret = BTRFS_NO_LOG_SYNC;
5091 goto end_no_trans;
5092 }
5093
8b050d35 5094 ret = start_log_trans(trans, root, ctx);
4a500fd1 5095 if (ret)
e87ac136 5096 goto end_no_trans;
e02119d5 5097
8407f553 5098 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
4a500fd1
YZ
5099 if (ret)
5100 goto end_trans;
12fcfd22 5101
af4176b4
CM
5102 /*
5103 * for regular files, if its inode is already on disk, we don't
5104 * have to worry about the parents at all. This is because
5105 * we can use the last_unlink_trans field to record renames
5106 * and other fun in this file.
5107 */
5108 if (S_ISREG(inode->i_mode) &&
5109 BTRFS_I(inode)->generation <= last_committed &&
4a500fd1
YZ
5110 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5111 ret = 0;
5112 goto end_trans;
5113 }
af4176b4 5114
2f2ff0ee
FM
5115 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5116 log_dentries = true;
5117
18aa0922
FM
5118 /*
5119 * On unlink we must make sure all our current and old parent directores
5120 * inodes are fully logged. This is to prevent leaving dangling
5121 * directory index entries in directories that were our parents but are
5122 * not anymore. Not doing this results in old parent directory being
5123 * impossible to delete after log replay (rmdir will always fail with
5124 * error -ENOTEMPTY).
5125 *
5126 * Example 1:
5127 *
5128 * mkdir testdir
5129 * touch testdir/foo
5130 * ln testdir/foo testdir/bar
5131 * sync
5132 * unlink testdir/bar
5133 * xfs_io -c fsync testdir/foo
5134 * <power failure>
5135 * mount fs, triggers log replay
5136 *
5137 * If we don't log the parent directory (testdir), after log replay the
5138 * directory still has an entry pointing to the file inode using the bar
5139 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5140 * the file inode has a link count of 1.
5141 *
5142 * Example 2:
5143 *
5144 * mkdir testdir
5145 * touch foo
5146 * ln foo testdir/foo2
5147 * ln foo testdir/foo3
5148 * sync
5149 * unlink testdir/foo3
5150 * xfs_io -c fsync foo
5151 * <power failure>
5152 * mount fs, triggers log replay
5153 *
5154 * Similar as the first example, after log replay the parent directory
5155 * testdir still has an entry pointing to the inode file with name foo3
5156 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5157 * and has a link count of 2.
5158 */
5159 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5160 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5161 if (ret)
5162 goto end_trans;
5163 }
5164
12fcfd22 5165 while (1) {
2b0143b5 5166 if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
e02119d5
CM
5167 break;
5168
2b0143b5 5169 inode = d_inode(parent);
76dda93c
YZ
5170 if (root != BTRFS_I(inode)->root)
5171 break;
5172
18aa0922
FM
5173 if (BTRFS_I(inode)->generation > last_committed) {
5174 ret = btrfs_log_inode(trans, root, inode,
5175 LOG_INODE_EXISTS,
8407f553 5176 0, LLONG_MAX, ctx);
4a500fd1
YZ
5177 if (ret)
5178 goto end_trans;
12fcfd22 5179 }
76dda93c 5180 if (IS_ROOT(parent))
e02119d5 5181 break;
12fcfd22 5182
6a912213
JB
5183 parent = dget_parent(parent);
5184 dput(old_parent);
5185 old_parent = parent;
e02119d5 5186 }
2f2ff0ee
FM
5187 if (log_dentries)
5188 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5189 else
5190 ret = 0;
4a500fd1 5191end_trans:
6a912213 5192 dput(old_parent);
4a500fd1 5193 if (ret < 0) {
995946dd 5194 btrfs_set_log_full_commit(root->fs_info, trans);
4a500fd1
YZ
5195 ret = 1;
5196 }
8b050d35
MX
5197
5198 if (ret)
5199 btrfs_remove_log_ctx(root, ctx);
12fcfd22
CM
5200 btrfs_end_log_trans(root);
5201end_no_trans:
5202 return ret;
e02119d5
CM
5203}
5204
5205/*
5206 * it is not safe to log dentry if the chunk root has added new
5207 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5208 * If this returns 1, you must commit the transaction to safely get your
5209 * data on disk.
5210 */
5211int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
8b050d35 5212 struct btrfs_root *root, struct dentry *dentry,
49dae1bc
FM
5213 const loff_t start,
5214 const loff_t end,
8b050d35 5215 struct btrfs_log_ctx *ctx)
e02119d5 5216{
6a912213
JB
5217 struct dentry *parent = dget_parent(dentry);
5218 int ret;
5219
2b0143b5 5220 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
49dae1bc 5221 start, end, 0, ctx);
6a912213
JB
5222 dput(parent);
5223
5224 return ret;
e02119d5
CM
5225}
5226
5227/*
5228 * should be called during mount to recover any replay any log trees
5229 * from the FS
5230 */
5231int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5232{
5233 int ret;
5234 struct btrfs_path *path;
5235 struct btrfs_trans_handle *trans;
5236 struct btrfs_key key;
5237 struct btrfs_key found_key;
5238 struct btrfs_key tmp_key;
5239 struct btrfs_root *log;
5240 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5241 struct walk_control wc = {
5242 .process_func = process_one_buffer,
5243 .stage = 0,
5244 };
5245
e02119d5 5246 path = btrfs_alloc_path();
db5b493a
TI
5247 if (!path)
5248 return -ENOMEM;
5249
5250 fs_info->log_root_recovering = 1;
e02119d5 5251
4a500fd1 5252 trans = btrfs_start_transaction(fs_info->tree_root, 0);
79787eaa
JM
5253 if (IS_ERR(trans)) {
5254 ret = PTR_ERR(trans);
5255 goto error;
5256 }
e02119d5
CM
5257
5258 wc.trans = trans;
5259 wc.pin = 1;
5260
db5b493a 5261 ret = walk_log_tree(trans, log_root_tree, &wc);
79787eaa
JM
5262 if (ret) {
5263 btrfs_error(fs_info, ret, "Failed to pin buffers while "
5264 "recovering log root tree.");
5265 goto error;
5266 }
e02119d5
CM
5267
5268again:
5269 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5270 key.offset = (u64)-1;
962a298f 5271 key.type = BTRFS_ROOT_ITEM_KEY;
e02119d5 5272
d397712b 5273 while (1) {
e02119d5 5274 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
79787eaa
JM
5275
5276 if (ret < 0) {
5277 btrfs_error(fs_info, ret,
5278 "Couldn't find tree log root.");
5279 goto error;
5280 }
e02119d5
CM
5281 if (ret > 0) {
5282 if (path->slots[0] == 0)
5283 break;
5284 path->slots[0]--;
5285 }
5286 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5287 path->slots[0]);
b3b4aa74 5288 btrfs_release_path(path);
e02119d5
CM
5289 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5290 break;
5291
cb517eab 5292 log = btrfs_read_fs_root(log_root_tree, &found_key);
79787eaa
JM
5293 if (IS_ERR(log)) {
5294 ret = PTR_ERR(log);
5295 btrfs_error(fs_info, ret,
5296 "Couldn't read tree log root.");
5297 goto error;
5298 }
e02119d5
CM
5299
5300 tmp_key.objectid = found_key.offset;
5301 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5302 tmp_key.offset = (u64)-1;
5303
5304 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
79787eaa
JM
5305 if (IS_ERR(wc.replay_dest)) {
5306 ret = PTR_ERR(wc.replay_dest);
b50c6e25
JB
5307 free_extent_buffer(log->node);
5308 free_extent_buffer(log->commit_root);
5309 kfree(log);
79787eaa
JM
5310 btrfs_error(fs_info, ret, "Couldn't read target root "
5311 "for tree log recovery.");
5312 goto error;
5313 }
e02119d5 5314
07d400a6 5315 wc.replay_dest->log_root = log;
5d4f98a2 5316 btrfs_record_root_in_trans(trans, wc.replay_dest);
e02119d5 5317 ret = walk_log_tree(trans, log, &wc);
e02119d5 5318
b50c6e25 5319 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
e02119d5
CM
5320 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5321 path);
e02119d5
CM
5322 }
5323
5324 key.offset = found_key.offset - 1;
07d400a6 5325 wc.replay_dest->log_root = NULL;
e02119d5 5326 free_extent_buffer(log->node);
b263c2c8 5327 free_extent_buffer(log->commit_root);
e02119d5
CM
5328 kfree(log);
5329
b50c6e25
JB
5330 if (ret)
5331 goto error;
5332
e02119d5
CM
5333 if (found_key.offset == 0)
5334 break;
5335 }
b3b4aa74 5336 btrfs_release_path(path);
e02119d5
CM
5337
5338 /* step one is to pin it all, step two is to replay just inodes */
5339 if (wc.pin) {
5340 wc.pin = 0;
5341 wc.process_func = replay_one_buffer;
5342 wc.stage = LOG_WALK_REPLAY_INODES;
5343 goto again;
5344 }
5345 /* step three is to replay everything */
5346 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5347 wc.stage++;
5348 goto again;
5349 }
5350
5351 btrfs_free_path(path);
5352
abefa55a
JB
5353 /* step 4: commit the transaction, which also unpins the blocks */
5354 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
5355 if (ret)
5356 return ret;
5357
e02119d5
CM
5358 free_extent_buffer(log_root_tree->node);
5359 log_root_tree->log_root = NULL;
5360 fs_info->log_root_recovering = 0;
e02119d5 5361 kfree(log_root_tree);
79787eaa 5362
abefa55a 5363 return 0;
79787eaa 5364error:
b50c6e25
JB
5365 if (wc.trans)
5366 btrfs_end_transaction(wc.trans, fs_info->tree_root);
79787eaa
JM
5367 btrfs_free_path(path);
5368 return ret;
e02119d5 5369}
12fcfd22
CM
5370
5371/*
5372 * there are some corner cases where we want to force a full
5373 * commit instead of allowing a directory to be logged.
5374 *
5375 * They revolve around files there were unlinked from the directory, and
5376 * this function updates the parent directory so that a full commit is
5377 * properly done if it is fsync'd later after the unlinks are done.
5378 */
5379void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5380 struct inode *dir, struct inode *inode,
5381 int for_rename)
5382{
af4176b4
CM
5383 /*
5384 * when we're logging a file, if it hasn't been renamed
5385 * or unlinked, and its inode is fully committed on disk,
5386 * we don't have to worry about walking up the directory chain
5387 * to log its parents.
5388 *
5389 * So, we use the last_unlink_trans field to put this transid
5390 * into the file. When the file is logged we check it and
5391 * don't log the parents if the file is fully on disk.
5392 */
5393 if (S_ISREG(inode->i_mode))
5394 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5395
12fcfd22
CM
5396 /*
5397 * if this directory was already logged any new
5398 * names for this file/dir will get recorded
5399 */
5400 smp_mb();
5401 if (BTRFS_I(dir)->logged_trans == trans->transid)
5402 return;
5403
5404 /*
5405 * if the inode we're about to unlink was logged,
5406 * the log will be properly updated for any new names
5407 */
5408 if (BTRFS_I(inode)->logged_trans == trans->transid)
5409 return;
5410
5411 /*
5412 * when renaming files across directories, if the directory
5413 * there we're unlinking from gets fsync'd later on, there's
5414 * no way to find the destination directory later and fsync it
5415 * properly. So, we have to be conservative and force commits
5416 * so the new name gets discovered.
5417 */
5418 if (for_rename)
5419 goto record;
5420
5421 /* we can safely do the unlink without any special recording */
5422 return;
5423
5424record:
5425 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5426}
5427
5428/*
5429 * Call this after adding a new name for a file and it will properly
5430 * update the log to reflect the new name.
5431 *
5432 * It will return zero if all goes well, and it will return 1 if a
5433 * full transaction commit is required.
5434 */
5435int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5436 struct inode *inode, struct inode *old_dir,
5437 struct dentry *parent)
5438{
5439 struct btrfs_root * root = BTRFS_I(inode)->root;
5440
af4176b4
CM
5441 /*
5442 * this will force the logging code to walk the dentry chain
5443 * up for the file
5444 */
5445 if (S_ISREG(inode->i_mode))
5446 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5447
12fcfd22
CM
5448 /*
5449 * if this inode hasn't been logged and directory we're renaming it
5450 * from hasn't been logged, we don't need to log it
5451 */
5452 if (BTRFS_I(inode)->logged_trans <=
5453 root->fs_info->last_trans_committed &&
5454 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5455 root->fs_info->last_trans_committed))
5456 return 0;
5457
49dae1bc
FM
5458 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5459 LLONG_MAX, 1, NULL);
12fcfd22
CM
5460}
5461
This page took 0.568715 seconds and 5 git commands to generate.