7966acd5dc7fb7a4417e5badae9d4285d56ab246
[deliverable/linux.git] / fs / btrfs / backref.c
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/vmalloc.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "backref.h"
23 #include "ulist.h"
24 #include "transaction.h"
25 #include "delayed-ref.h"
26 #include "locking.h"
27
28 struct extent_inode_elem {
29 u64 inum;
30 u64 offset;
31 struct extent_inode_elem *next;
32 };
33
34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
35 struct btrfs_file_extent_item *fi,
36 u64 extent_item_pos,
37 struct extent_inode_elem **eie)
38 {
39 u64 offset = 0;
40 struct extent_inode_elem *e;
41
42 if (!btrfs_file_extent_compression(eb, fi) &&
43 !btrfs_file_extent_encryption(eb, fi) &&
44 !btrfs_file_extent_other_encoding(eb, fi)) {
45 u64 data_offset;
46 u64 data_len;
47
48 data_offset = btrfs_file_extent_offset(eb, fi);
49 data_len = btrfs_file_extent_num_bytes(eb, fi);
50
51 if (extent_item_pos < data_offset ||
52 extent_item_pos >= data_offset + data_len)
53 return 1;
54 offset = extent_item_pos - data_offset;
55 }
56
57 e = kmalloc(sizeof(*e), GFP_NOFS);
58 if (!e)
59 return -ENOMEM;
60
61 e->next = *eie;
62 e->inum = key->objectid;
63 e->offset = key->offset + offset;
64 *eie = e;
65
66 return 0;
67 }
68
69 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
70 u64 extent_item_pos,
71 struct extent_inode_elem **eie)
72 {
73 u64 disk_byte;
74 struct btrfs_key key;
75 struct btrfs_file_extent_item *fi;
76 int slot;
77 int nritems;
78 int extent_type;
79 int ret;
80
81 /*
82 * from the shared data ref, we only have the leaf but we need
83 * the key. thus, we must look into all items and see that we
84 * find one (some) with a reference to our extent item.
85 */
86 nritems = btrfs_header_nritems(eb);
87 for (slot = 0; slot < nritems; ++slot) {
88 btrfs_item_key_to_cpu(eb, &key, slot);
89 if (key.type != BTRFS_EXTENT_DATA_KEY)
90 continue;
91 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
92 extent_type = btrfs_file_extent_type(eb, fi);
93 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
94 continue;
95 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
96 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
97 if (disk_byte != wanted_disk_byte)
98 continue;
99
100 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
101 if (ret < 0)
102 return ret;
103 }
104
105 return 0;
106 }
107
108 /*
109 * this structure records all encountered refs on the way up to the root
110 */
111 struct __prelim_ref {
112 struct list_head list;
113 u64 root_id;
114 struct btrfs_key key_for_search;
115 int level;
116 int count;
117 struct extent_inode_elem *inode_list;
118 u64 parent;
119 u64 wanted_disk_byte;
120 };
121
122 static struct kmem_cache *btrfs_prelim_ref_cache;
123
124 int __init btrfs_prelim_ref_init(void)
125 {
126 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
127 sizeof(struct __prelim_ref),
128 0,
129 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
130 NULL);
131 if (!btrfs_prelim_ref_cache)
132 return -ENOMEM;
133 return 0;
134 }
135
136 void btrfs_prelim_ref_exit(void)
137 {
138 if (btrfs_prelim_ref_cache)
139 kmem_cache_destroy(btrfs_prelim_ref_cache);
140 }
141
142 /*
143 * the rules for all callers of this function are:
144 * - obtaining the parent is the goal
145 * - if you add a key, you must know that it is a correct key
146 * - if you cannot add the parent or a correct key, then we will look into the
147 * block later to set a correct key
148 *
149 * delayed refs
150 * ============
151 * backref type | shared | indirect | shared | indirect
152 * information | tree | tree | data | data
153 * --------------------+--------+----------+--------+----------
154 * parent logical | y | - | - | -
155 * key to resolve | - | y | y | y
156 * tree block logical | - | - | - | -
157 * root for resolving | y | y | y | y
158 *
159 * - column 1: we've the parent -> done
160 * - column 2, 3, 4: we use the key to find the parent
161 *
162 * on disk refs (inline or keyed)
163 * ==============================
164 * backref type | shared | indirect | shared | indirect
165 * information | tree | tree | data | data
166 * --------------------+--------+----------+--------+----------
167 * parent logical | y | - | y | -
168 * key to resolve | - | - | - | y
169 * tree block logical | y | y | y | y
170 * root for resolving | - | y | y | y
171 *
172 * - column 1, 3: we've the parent -> done
173 * - column 2: we take the first key from the block to find the parent
174 * (see __add_missing_keys)
175 * - column 4: we use the key to find the parent
176 *
177 * additional information that's available but not required to find the parent
178 * block might help in merging entries to gain some speed.
179 */
180
181 static int __add_prelim_ref(struct list_head *head, u64 root_id,
182 struct btrfs_key *key, int level,
183 u64 parent, u64 wanted_disk_byte, int count,
184 gfp_t gfp_mask)
185 {
186 struct __prelim_ref *ref;
187
188 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
189 return 0;
190
191 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
192 if (!ref)
193 return -ENOMEM;
194
195 ref->root_id = root_id;
196 if (key)
197 ref->key_for_search = *key;
198 else
199 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
200
201 ref->inode_list = NULL;
202 ref->level = level;
203 ref->count = count;
204 ref->parent = parent;
205 ref->wanted_disk_byte = wanted_disk_byte;
206 list_add_tail(&ref->list, head);
207
208 return 0;
209 }
210
211 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
212 struct ulist *parents, struct __prelim_ref *ref,
213 int level, u64 time_seq, const u64 *extent_item_pos)
214 {
215 int ret = 0;
216 int slot;
217 struct extent_buffer *eb;
218 struct btrfs_key key;
219 struct btrfs_key *key_for_search = &ref->key_for_search;
220 struct btrfs_file_extent_item *fi;
221 struct extent_inode_elem *eie = NULL, *old = NULL;
222 u64 disk_byte;
223 u64 wanted_disk_byte = ref->wanted_disk_byte;
224 u64 count = 0;
225
226 if (level != 0) {
227 eb = path->nodes[level];
228 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
229 if (ret < 0)
230 return ret;
231 return 0;
232 }
233
234 /*
235 * We normally enter this function with the path already pointing to
236 * the first item to check. But sometimes, we may enter it with
237 * slot==nritems. In that case, go to the next leaf before we continue.
238 */
239 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
240 ret = btrfs_next_old_leaf(root, path, time_seq);
241
242 while (!ret && count < ref->count) {
243 eb = path->nodes[0];
244 slot = path->slots[0];
245
246 btrfs_item_key_to_cpu(eb, &key, slot);
247
248 if (key.objectid != key_for_search->objectid ||
249 key.type != BTRFS_EXTENT_DATA_KEY)
250 break;
251
252 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
253 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
254
255 if (disk_byte == wanted_disk_byte) {
256 eie = NULL;
257 old = NULL;
258 count++;
259 if (extent_item_pos) {
260 ret = check_extent_in_eb(&key, eb, fi,
261 *extent_item_pos,
262 &eie);
263 if (ret < 0)
264 break;
265 }
266 if (ret > 0)
267 goto next;
268 ret = ulist_add_merge(parents, eb->start,
269 (uintptr_t)eie,
270 (u64 *)&old, GFP_NOFS);
271 if (ret < 0)
272 break;
273 if (!ret && extent_item_pos) {
274 while (old->next)
275 old = old->next;
276 old->next = eie;
277 }
278 }
279 next:
280 ret = btrfs_next_old_item(root, path, time_seq);
281 }
282
283 if (ret > 0)
284 ret = 0;
285 return ret;
286 }
287
288 /*
289 * resolve an indirect backref in the form (root_id, key, level)
290 * to a logical address
291 */
292 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
293 struct btrfs_path *path, u64 time_seq,
294 struct __prelim_ref *ref,
295 struct ulist *parents,
296 const u64 *extent_item_pos)
297 {
298 struct btrfs_root *root;
299 struct btrfs_key root_key;
300 struct extent_buffer *eb;
301 int ret = 0;
302 int root_level;
303 int level = ref->level;
304 int index;
305
306 root_key.objectid = ref->root_id;
307 root_key.type = BTRFS_ROOT_ITEM_KEY;
308 root_key.offset = (u64)-1;
309
310 index = srcu_read_lock(&fs_info->subvol_srcu);
311
312 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
313 if (IS_ERR(root)) {
314 srcu_read_unlock(&fs_info->subvol_srcu, index);
315 ret = PTR_ERR(root);
316 goto out;
317 }
318
319 root_level = btrfs_old_root_level(root, time_seq);
320
321 if (root_level + 1 == level) {
322 srcu_read_unlock(&fs_info->subvol_srcu, index);
323 goto out;
324 }
325
326 path->lowest_level = level;
327 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
328
329 /* root node has been locked, we can release @subvol_srcu safely here */
330 srcu_read_unlock(&fs_info->subvol_srcu, index);
331
332 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
333 "%d for key (%llu %u %llu)\n",
334 ref->root_id, level, ref->count, ret,
335 ref->key_for_search.objectid, ref->key_for_search.type,
336 ref->key_for_search.offset);
337 if (ret < 0)
338 goto out;
339
340 eb = path->nodes[level];
341 while (!eb) {
342 if (WARN_ON(!level)) {
343 ret = 1;
344 goto out;
345 }
346 level--;
347 eb = path->nodes[level];
348 }
349
350 ret = add_all_parents(root, path, parents, ref, level, time_seq,
351 extent_item_pos);
352 out:
353 path->lowest_level = 0;
354 btrfs_release_path(path);
355 return ret;
356 }
357
358 /*
359 * resolve all indirect backrefs from the list
360 */
361 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
362 struct btrfs_path *path, u64 time_seq,
363 struct list_head *head,
364 const u64 *extent_item_pos)
365 {
366 int err;
367 int ret = 0;
368 struct __prelim_ref *ref;
369 struct __prelim_ref *ref_safe;
370 struct __prelim_ref *new_ref;
371 struct ulist *parents;
372 struct ulist_node *node;
373 struct ulist_iterator uiter;
374
375 parents = ulist_alloc(GFP_NOFS);
376 if (!parents)
377 return -ENOMEM;
378
379 /*
380 * _safe allows us to insert directly after the current item without
381 * iterating over the newly inserted items.
382 * we're also allowed to re-assign ref during iteration.
383 */
384 list_for_each_entry_safe(ref, ref_safe, head, list) {
385 if (ref->parent) /* already direct */
386 continue;
387 if (ref->count == 0)
388 continue;
389 err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
390 parents, extent_item_pos);
391 /*
392 * we can only tolerate ENOENT,otherwise,we should catch error
393 * and return directly.
394 */
395 if (err == -ENOENT) {
396 continue;
397 } else if (err) {
398 ret = err;
399 goto out;
400 }
401
402 /* we put the first parent into the ref at hand */
403 ULIST_ITER_INIT(&uiter);
404 node = ulist_next(parents, &uiter);
405 ref->parent = node ? node->val : 0;
406 ref->inode_list = node ?
407 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
408
409 /* additional parents require new refs being added here */
410 while ((node = ulist_next(parents, &uiter))) {
411 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
412 GFP_NOFS);
413 if (!new_ref) {
414 ret = -ENOMEM;
415 goto out;
416 }
417 memcpy(new_ref, ref, sizeof(*ref));
418 new_ref->parent = node->val;
419 new_ref->inode_list = (struct extent_inode_elem *)
420 (uintptr_t)node->aux;
421 list_add(&new_ref->list, &ref->list);
422 }
423 ulist_reinit(parents);
424 }
425 out:
426 ulist_free(parents);
427 return ret;
428 }
429
430 static inline int ref_for_same_block(struct __prelim_ref *ref1,
431 struct __prelim_ref *ref2)
432 {
433 if (ref1->level != ref2->level)
434 return 0;
435 if (ref1->root_id != ref2->root_id)
436 return 0;
437 if (ref1->key_for_search.type != ref2->key_for_search.type)
438 return 0;
439 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
440 return 0;
441 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
442 return 0;
443 if (ref1->parent != ref2->parent)
444 return 0;
445
446 return 1;
447 }
448
449 /*
450 * read tree blocks and add keys where required.
451 */
452 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
453 struct list_head *head)
454 {
455 struct list_head *pos;
456 struct extent_buffer *eb;
457
458 list_for_each(pos, head) {
459 struct __prelim_ref *ref;
460 ref = list_entry(pos, struct __prelim_ref, list);
461
462 if (ref->parent)
463 continue;
464 if (ref->key_for_search.type)
465 continue;
466 BUG_ON(!ref->wanted_disk_byte);
467 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
468 fs_info->tree_root->leafsize, 0);
469 if (!eb || !extent_buffer_uptodate(eb)) {
470 free_extent_buffer(eb);
471 return -EIO;
472 }
473 btrfs_tree_read_lock(eb);
474 if (btrfs_header_level(eb) == 0)
475 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
476 else
477 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
478 btrfs_tree_read_unlock(eb);
479 free_extent_buffer(eb);
480 }
481 return 0;
482 }
483
484 /*
485 * merge two lists of backrefs and adjust counts accordingly
486 *
487 * mode = 1: merge identical keys, if key is set
488 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
489 * additionally, we could even add a key range for the blocks we
490 * looked into to merge even more (-> replace unresolved refs by those
491 * having a parent).
492 * mode = 2: merge identical parents
493 */
494 static void __merge_refs(struct list_head *head, int mode)
495 {
496 struct list_head *pos1;
497
498 list_for_each(pos1, head) {
499 struct list_head *n2;
500 struct list_head *pos2;
501 struct __prelim_ref *ref1;
502
503 ref1 = list_entry(pos1, struct __prelim_ref, list);
504
505 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
506 pos2 = n2, n2 = pos2->next) {
507 struct __prelim_ref *ref2;
508 struct __prelim_ref *xchg;
509 struct extent_inode_elem *eie;
510
511 ref2 = list_entry(pos2, struct __prelim_ref, list);
512
513 if (mode == 1) {
514 if (!ref_for_same_block(ref1, ref2))
515 continue;
516 if (!ref1->parent && ref2->parent) {
517 xchg = ref1;
518 ref1 = ref2;
519 ref2 = xchg;
520 }
521 } else {
522 if (ref1->parent != ref2->parent)
523 continue;
524 }
525
526 eie = ref1->inode_list;
527 while (eie && eie->next)
528 eie = eie->next;
529 if (eie)
530 eie->next = ref2->inode_list;
531 else
532 ref1->inode_list = ref2->inode_list;
533 ref1->count += ref2->count;
534
535 list_del(&ref2->list);
536 kmem_cache_free(btrfs_prelim_ref_cache, ref2);
537 }
538
539 }
540 }
541
542 /*
543 * add all currently queued delayed refs from this head whose seq nr is
544 * smaller or equal that seq to the list
545 */
546 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
547 struct list_head *prefs)
548 {
549 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
550 struct rb_node *n = &head->node.rb_node;
551 struct btrfs_key key;
552 struct btrfs_key op_key = {0};
553 int sgn;
554 int ret = 0;
555
556 if (extent_op && extent_op->update_key)
557 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
558
559 spin_lock(&head->lock);
560 n = rb_first(&head->ref_root);
561 while (n) {
562 struct btrfs_delayed_ref_node *node;
563 node = rb_entry(n, struct btrfs_delayed_ref_node,
564 rb_node);
565 n = rb_next(n);
566 if (node->seq > seq)
567 continue;
568
569 switch (node->action) {
570 case BTRFS_ADD_DELAYED_EXTENT:
571 case BTRFS_UPDATE_DELAYED_HEAD:
572 WARN_ON(1);
573 continue;
574 case BTRFS_ADD_DELAYED_REF:
575 sgn = 1;
576 break;
577 case BTRFS_DROP_DELAYED_REF:
578 sgn = -1;
579 break;
580 default:
581 BUG_ON(1);
582 }
583 switch (node->type) {
584 case BTRFS_TREE_BLOCK_REF_KEY: {
585 struct btrfs_delayed_tree_ref *ref;
586
587 ref = btrfs_delayed_node_to_tree_ref(node);
588 ret = __add_prelim_ref(prefs, ref->root, &op_key,
589 ref->level + 1, 0, node->bytenr,
590 node->ref_mod * sgn, GFP_ATOMIC);
591 break;
592 }
593 case BTRFS_SHARED_BLOCK_REF_KEY: {
594 struct btrfs_delayed_tree_ref *ref;
595
596 ref = btrfs_delayed_node_to_tree_ref(node);
597 ret = __add_prelim_ref(prefs, ref->root, NULL,
598 ref->level + 1, ref->parent,
599 node->bytenr,
600 node->ref_mod * sgn, GFP_ATOMIC);
601 break;
602 }
603 case BTRFS_EXTENT_DATA_REF_KEY: {
604 struct btrfs_delayed_data_ref *ref;
605 ref = btrfs_delayed_node_to_data_ref(node);
606
607 key.objectid = ref->objectid;
608 key.type = BTRFS_EXTENT_DATA_KEY;
609 key.offset = ref->offset;
610 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
611 node->bytenr,
612 node->ref_mod * sgn, GFP_ATOMIC);
613 break;
614 }
615 case BTRFS_SHARED_DATA_REF_KEY: {
616 struct btrfs_delayed_data_ref *ref;
617
618 ref = btrfs_delayed_node_to_data_ref(node);
619
620 key.objectid = ref->objectid;
621 key.type = BTRFS_EXTENT_DATA_KEY;
622 key.offset = ref->offset;
623 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
624 ref->parent, node->bytenr,
625 node->ref_mod * sgn, GFP_ATOMIC);
626 break;
627 }
628 default:
629 WARN_ON(1);
630 }
631 if (ret)
632 break;
633 }
634 spin_unlock(&head->lock);
635 return ret;
636 }
637
638 /*
639 * add all inline backrefs for bytenr to the list
640 */
641 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
642 struct btrfs_path *path, u64 bytenr,
643 int *info_level, struct list_head *prefs)
644 {
645 int ret = 0;
646 int slot;
647 struct extent_buffer *leaf;
648 struct btrfs_key key;
649 struct btrfs_key found_key;
650 unsigned long ptr;
651 unsigned long end;
652 struct btrfs_extent_item *ei;
653 u64 flags;
654 u64 item_size;
655
656 /*
657 * enumerate all inline refs
658 */
659 leaf = path->nodes[0];
660 slot = path->slots[0];
661
662 item_size = btrfs_item_size_nr(leaf, slot);
663 BUG_ON(item_size < sizeof(*ei));
664
665 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
666 flags = btrfs_extent_flags(leaf, ei);
667 btrfs_item_key_to_cpu(leaf, &found_key, slot);
668
669 ptr = (unsigned long)(ei + 1);
670 end = (unsigned long)ei + item_size;
671
672 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
673 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
674 struct btrfs_tree_block_info *info;
675
676 info = (struct btrfs_tree_block_info *)ptr;
677 *info_level = btrfs_tree_block_level(leaf, info);
678 ptr += sizeof(struct btrfs_tree_block_info);
679 BUG_ON(ptr > end);
680 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
681 *info_level = found_key.offset;
682 } else {
683 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
684 }
685
686 while (ptr < end) {
687 struct btrfs_extent_inline_ref *iref;
688 u64 offset;
689 int type;
690
691 iref = (struct btrfs_extent_inline_ref *)ptr;
692 type = btrfs_extent_inline_ref_type(leaf, iref);
693 offset = btrfs_extent_inline_ref_offset(leaf, iref);
694
695 switch (type) {
696 case BTRFS_SHARED_BLOCK_REF_KEY:
697 ret = __add_prelim_ref(prefs, 0, NULL,
698 *info_level + 1, offset,
699 bytenr, 1, GFP_NOFS);
700 break;
701 case BTRFS_SHARED_DATA_REF_KEY: {
702 struct btrfs_shared_data_ref *sdref;
703 int count;
704
705 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
706 count = btrfs_shared_data_ref_count(leaf, sdref);
707 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
708 bytenr, count, GFP_NOFS);
709 break;
710 }
711 case BTRFS_TREE_BLOCK_REF_KEY:
712 ret = __add_prelim_ref(prefs, offset, NULL,
713 *info_level + 1, 0,
714 bytenr, 1, GFP_NOFS);
715 break;
716 case BTRFS_EXTENT_DATA_REF_KEY: {
717 struct btrfs_extent_data_ref *dref;
718 int count;
719 u64 root;
720
721 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
722 count = btrfs_extent_data_ref_count(leaf, dref);
723 key.objectid = btrfs_extent_data_ref_objectid(leaf,
724 dref);
725 key.type = BTRFS_EXTENT_DATA_KEY;
726 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
727 root = btrfs_extent_data_ref_root(leaf, dref);
728 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
729 bytenr, count, GFP_NOFS);
730 break;
731 }
732 default:
733 WARN_ON(1);
734 }
735 if (ret)
736 return ret;
737 ptr += btrfs_extent_inline_ref_size(type);
738 }
739
740 return 0;
741 }
742
743 /*
744 * add all non-inline backrefs for bytenr to the list
745 */
746 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
747 struct btrfs_path *path, u64 bytenr,
748 int info_level, struct list_head *prefs)
749 {
750 struct btrfs_root *extent_root = fs_info->extent_root;
751 int ret;
752 int slot;
753 struct extent_buffer *leaf;
754 struct btrfs_key key;
755
756 while (1) {
757 ret = btrfs_next_item(extent_root, path);
758 if (ret < 0)
759 break;
760 if (ret) {
761 ret = 0;
762 break;
763 }
764
765 slot = path->slots[0];
766 leaf = path->nodes[0];
767 btrfs_item_key_to_cpu(leaf, &key, slot);
768
769 if (key.objectid != bytenr)
770 break;
771 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
772 continue;
773 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
774 break;
775
776 switch (key.type) {
777 case BTRFS_SHARED_BLOCK_REF_KEY:
778 ret = __add_prelim_ref(prefs, 0, NULL,
779 info_level + 1, key.offset,
780 bytenr, 1, GFP_NOFS);
781 break;
782 case BTRFS_SHARED_DATA_REF_KEY: {
783 struct btrfs_shared_data_ref *sdref;
784 int count;
785
786 sdref = btrfs_item_ptr(leaf, slot,
787 struct btrfs_shared_data_ref);
788 count = btrfs_shared_data_ref_count(leaf, sdref);
789 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
790 bytenr, count, GFP_NOFS);
791 break;
792 }
793 case BTRFS_TREE_BLOCK_REF_KEY:
794 ret = __add_prelim_ref(prefs, key.offset, NULL,
795 info_level + 1, 0,
796 bytenr, 1, GFP_NOFS);
797 break;
798 case BTRFS_EXTENT_DATA_REF_KEY: {
799 struct btrfs_extent_data_ref *dref;
800 int count;
801 u64 root;
802
803 dref = btrfs_item_ptr(leaf, slot,
804 struct btrfs_extent_data_ref);
805 count = btrfs_extent_data_ref_count(leaf, dref);
806 key.objectid = btrfs_extent_data_ref_objectid(leaf,
807 dref);
808 key.type = BTRFS_EXTENT_DATA_KEY;
809 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
810 root = btrfs_extent_data_ref_root(leaf, dref);
811 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
812 bytenr, count, GFP_NOFS);
813 break;
814 }
815 default:
816 WARN_ON(1);
817 }
818 if (ret)
819 return ret;
820
821 }
822
823 return ret;
824 }
825
826 /*
827 * this adds all existing backrefs (inline backrefs, backrefs and delayed
828 * refs) for the given bytenr to the refs list, merges duplicates and resolves
829 * indirect refs to their parent bytenr.
830 * When roots are found, they're added to the roots list
831 *
832 * FIXME some caching might speed things up
833 */
834 static int find_parent_nodes(struct btrfs_trans_handle *trans,
835 struct btrfs_fs_info *fs_info, u64 bytenr,
836 u64 time_seq, struct ulist *refs,
837 struct ulist *roots, const u64 *extent_item_pos)
838 {
839 struct btrfs_key key;
840 struct btrfs_path *path;
841 struct btrfs_delayed_ref_root *delayed_refs = NULL;
842 struct btrfs_delayed_ref_head *head;
843 int info_level = 0;
844 int ret;
845 struct list_head prefs_delayed;
846 struct list_head prefs;
847 struct __prelim_ref *ref;
848
849 INIT_LIST_HEAD(&prefs);
850 INIT_LIST_HEAD(&prefs_delayed);
851
852 key.objectid = bytenr;
853 key.offset = (u64)-1;
854 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
855 key.type = BTRFS_METADATA_ITEM_KEY;
856 else
857 key.type = BTRFS_EXTENT_ITEM_KEY;
858
859 path = btrfs_alloc_path();
860 if (!path)
861 return -ENOMEM;
862 if (!trans)
863 path->search_commit_root = 1;
864
865 /*
866 * grab both a lock on the path and a lock on the delayed ref head.
867 * We need both to get a consistent picture of how the refs look
868 * at a specified point in time
869 */
870 again:
871 head = NULL;
872
873 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
874 if (ret < 0)
875 goto out;
876 BUG_ON(ret == 0);
877
878 if (trans) {
879 /*
880 * look if there are updates for this ref queued and lock the
881 * head
882 */
883 delayed_refs = &trans->transaction->delayed_refs;
884 spin_lock(&delayed_refs->lock);
885 head = btrfs_find_delayed_ref_head(trans, bytenr);
886 if (head) {
887 if (!mutex_trylock(&head->mutex)) {
888 atomic_inc(&head->node.refs);
889 spin_unlock(&delayed_refs->lock);
890
891 btrfs_release_path(path);
892
893 /*
894 * Mutex was contended, block until it's
895 * released and try again
896 */
897 mutex_lock(&head->mutex);
898 mutex_unlock(&head->mutex);
899 btrfs_put_delayed_ref(&head->node);
900 goto again;
901 }
902 spin_unlock(&delayed_refs->lock);
903 ret = __add_delayed_refs(head, time_seq,
904 &prefs_delayed);
905 mutex_unlock(&head->mutex);
906 if (ret)
907 goto out;
908 } else {
909 spin_unlock(&delayed_refs->lock);
910 }
911 }
912
913 if (path->slots[0]) {
914 struct extent_buffer *leaf;
915 int slot;
916
917 path->slots[0]--;
918 leaf = path->nodes[0];
919 slot = path->slots[0];
920 btrfs_item_key_to_cpu(leaf, &key, slot);
921 if (key.objectid == bytenr &&
922 (key.type == BTRFS_EXTENT_ITEM_KEY ||
923 key.type == BTRFS_METADATA_ITEM_KEY)) {
924 ret = __add_inline_refs(fs_info, path, bytenr,
925 &info_level, &prefs);
926 if (ret)
927 goto out;
928 ret = __add_keyed_refs(fs_info, path, bytenr,
929 info_level, &prefs);
930 if (ret)
931 goto out;
932 }
933 }
934 btrfs_release_path(path);
935
936 list_splice_init(&prefs_delayed, &prefs);
937
938 ret = __add_missing_keys(fs_info, &prefs);
939 if (ret)
940 goto out;
941
942 __merge_refs(&prefs, 1);
943
944 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
945 extent_item_pos);
946 if (ret)
947 goto out;
948
949 __merge_refs(&prefs, 2);
950
951 while (!list_empty(&prefs)) {
952 ref = list_first_entry(&prefs, struct __prelim_ref, list);
953 WARN_ON(ref->count < 0);
954 if (ref->count && ref->root_id && ref->parent == 0) {
955 /* no parent == root of tree */
956 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
957 if (ret < 0)
958 goto out;
959 }
960 if (ref->count && ref->parent) {
961 struct extent_inode_elem *eie = NULL;
962 if (extent_item_pos && !ref->inode_list) {
963 u32 bsz;
964 struct extent_buffer *eb;
965 bsz = btrfs_level_size(fs_info->extent_root,
966 info_level);
967 eb = read_tree_block(fs_info->extent_root,
968 ref->parent, bsz, 0);
969 if (!eb || !extent_buffer_uptodate(eb)) {
970 free_extent_buffer(eb);
971 ret = -EIO;
972 goto out;
973 }
974 ret = find_extent_in_eb(eb, bytenr,
975 *extent_item_pos, &eie);
976 free_extent_buffer(eb);
977 if (ret < 0)
978 goto out;
979 ref->inode_list = eie;
980 }
981 ret = ulist_add_merge(refs, ref->parent,
982 (uintptr_t)ref->inode_list,
983 (u64 *)&eie, GFP_NOFS);
984 if (ret < 0)
985 goto out;
986 if (!ret && extent_item_pos) {
987 /*
988 * we've recorded that parent, so we must extend
989 * its inode list here
990 */
991 BUG_ON(!eie);
992 while (eie->next)
993 eie = eie->next;
994 eie->next = ref->inode_list;
995 }
996 }
997 list_del(&ref->list);
998 kmem_cache_free(btrfs_prelim_ref_cache, ref);
999 }
1000
1001 out:
1002 btrfs_free_path(path);
1003 while (!list_empty(&prefs)) {
1004 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1005 list_del(&ref->list);
1006 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1007 }
1008 while (!list_empty(&prefs_delayed)) {
1009 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1010 list);
1011 list_del(&ref->list);
1012 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1013 }
1014
1015 return ret;
1016 }
1017
1018 static void free_leaf_list(struct ulist *blocks)
1019 {
1020 struct ulist_node *node = NULL;
1021 struct extent_inode_elem *eie;
1022 struct extent_inode_elem *eie_next;
1023 struct ulist_iterator uiter;
1024
1025 ULIST_ITER_INIT(&uiter);
1026 while ((node = ulist_next(blocks, &uiter))) {
1027 if (!node->aux)
1028 continue;
1029 eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1030 for (; eie; eie = eie_next) {
1031 eie_next = eie->next;
1032 kfree(eie);
1033 }
1034 node->aux = 0;
1035 }
1036
1037 ulist_free(blocks);
1038 }
1039
1040 /*
1041 * Finds all leafs with a reference to the specified combination of bytenr and
1042 * offset. key_list_head will point to a list of corresponding keys (caller must
1043 * free each list element). The leafs will be stored in the leafs ulist, which
1044 * must be freed with ulist_free.
1045 *
1046 * returns 0 on success, <0 on error
1047 */
1048 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1049 struct btrfs_fs_info *fs_info, u64 bytenr,
1050 u64 time_seq, struct ulist **leafs,
1051 const u64 *extent_item_pos)
1052 {
1053 struct ulist *tmp;
1054 int ret;
1055
1056 tmp = ulist_alloc(GFP_NOFS);
1057 if (!tmp)
1058 return -ENOMEM;
1059 *leafs = ulist_alloc(GFP_NOFS);
1060 if (!*leafs) {
1061 ulist_free(tmp);
1062 return -ENOMEM;
1063 }
1064
1065 ret = find_parent_nodes(trans, fs_info, bytenr,
1066 time_seq, *leafs, tmp, extent_item_pos);
1067 ulist_free(tmp);
1068
1069 if (ret < 0 && ret != -ENOENT) {
1070 free_leaf_list(*leafs);
1071 return ret;
1072 }
1073
1074 return 0;
1075 }
1076
1077 /*
1078 * walk all backrefs for a given extent to find all roots that reference this
1079 * extent. Walking a backref means finding all extents that reference this
1080 * extent and in turn walk the backrefs of those, too. Naturally this is a
1081 * recursive process, but here it is implemented in an iterative fashion: We
1082 * find all referencing extents for the extent in question and put them on a
1083 * list. In turn, we find all referencing extents for those, further appending
1084 * to the list. The way we iterate the list allows adding more elements after
1085 * the current while iterating. The process stops when we reach the end of the
1086 * list. Found roots are added to the roots list.
1087 *
1088 * returns 0 on success, < 0 on error.
1089 */
1090 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1091 struct btrfs_fs_info *fs_info, u64 bytenr,
1092 u64 time_seq, struct ulist **roots)
1093 {
1094 struct ulist *tmp;
1095 struct ulist_node *node = NULL;
1096 struct ulist_iterator uiter;
1097 int ret;
1098
1099 tmp = ulist_alloc(GFP_NOFS);
1100 if (!tmp)
1101 return -ENOMEM;
1102 *roots = ulist_alloc(GFP_NOFS);
1103 if (!*roots) {
1104 ulist_free(tmp);
1105 return -ENOMEM;
1106 }
1107
1108 ULIST_ITER_INIT(&uiter);
1109 while (1) {
1110 ret = find_parent_nodes(trans, fs_info, bytenr,
1111 time_seq, tmp, *roots, NULL);
1112 if (ret < 0 && ret != -ENOENT) {
1113 ulist_free(tmp);
1114 ulist_free(*roots);
1115 return ret;
1116 }
1117 node = ulist_next(tmp, &uiter);
1118 if (!node)
1119 break;
1120 bytenr = node->val;
1121 cond_resched();
1122 }
1123
1124 ulist_free(tmp);
1125 return 0;
1126 }
1127
1128 /*
1129 * this makes the path point to (inum INODE_ITEM ioff)
1130 */
1131 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1132 struct btrfs_path *path)
1133 {
1134 struct btrfs_key key;
1135 return btrfs_find_item(fs_root, path, inum, ioff,
1136 BTRFS_INODE_ITEM_KEY, &key);
1137 }
1138
1139 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1140 struct btrfs_path *path,
1141 struct btrfs_key *found_key)
1142 {
1143 return btrfs_find_item(fs_root, path, inum, ioff,
1144 BTRFS_INODE_REF_KEY, found_key);
1145 }
1146
1147 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1148 u64 start_off, struct btrfs_path *path,
1149 struct btrfs_inode_extref **ret_extref,
1150 u64 *found_off)
1151 {
1152 int ret, slot;
1153 struct btrfs_key key;
1154 struct btrfs_key found_key;
1155 struct btrfs_inode_extref *extref;
1156 struct extent_buffer *leaf;
1157 unsigned long ptr;
1158
1159 key.objectid = inode_objectid;
1160 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1161 key.offset = start_off;
1162
1163 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1164 if (ret < 0)
1165 return ret;
1166
1167 while (1) {
1168 leaf = path->nodes[0];
1169 slot = path->slots[0];
1170 if (slot >= btrfs_header_nritems(leaf)) {
1171 /*
1172 * If the item at offset is not found,
1173 * btrfs_search_slot will point us to the slot
1174 * where it should be inserted. In our case
1175 * that will be the slot directly before the
1176 * next INODE_REF_KEY_V2 item. In the case
1177 * that we're pointing to the last slot in a
1178 * leaf, we must move one leaf over.
1179 */
1180 ret = btrfs_next_leaf(root, path);
1181 if (ret) {
1182 if (ret >= 1)
1183 ret = -ENOENT;
1184 break;
1185 }
1186 continue;
1187 }
1188
1189 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1190
1191 /*
1192 * Check that we're still looking at an extended ref key for
1193 * this particular objectid. If we have different
1194 * objectid or type then there are no more to be found
1195 * in the tree and we can exit.
1196 */
1197 ret = -ENOENT;
1198 if (found_key.objectid != inode_objectid)
1199 break;
1200 if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
1201 break;
1202
1203 ret = 0;
1204 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1205 extref = (struct btrfs_inode_extref *)ptr;
1206 *ret_extref = extref;
1207 if (found_off)
1208 *found_off = found_key.offset;
1209 break;
1210 }
1211
1212 return ret;
1213 }
1214
1215 /*
1216 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1217 * Elements of the path are separated by '/' and the path is guaranteed to be
1218 * 0-terminated. the path is only given within the current file system.
1219 * Therefore, it never starts with a '/'. the caller is responsible to provide
1220 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1221 * the start point of the resulting string is returned. this pointer is within
1222 * dest, normally.
1223 * in case the path buffer would overflow, the pointer is decremented further
1224 * as if output was written to the buffer, though no more output is actually
1225 * generated. that way, the caller can determine how much space would be
1226 * required for the path to fit into the buffer. in that case, the returned
1227 * value will be smaller than dest. callers must check this!
1228 */
1229 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1230 u32 name_len, unsigned long name_off,
1231 struct extent_buffer *eb_in, u64 parent,
1232 char *dest, u32 size)
1233 {
1234 int slot;
1235 u64 next_inum;
1236 int ret;
1237 s64 bytes_left = ((s64)size) - 1;
1238 struct extent_buffer *eb = eb_in;
1239 struct btrfs_key found_key;
1240 int leave_spinning = path->leave_spinning;
1241 struct btrfs_inode_ref *iref;
1242
1243 if (bytes_left >= 0)
1244 dest[bytes_left] = '\0';
1245
1246 path->leave_spinning = 1;
1247 while (1) {
1248 bytes_left -= name_len;
1249 if (bytes_left >= 0)
1250 read_extent_buffer(eb, dest + bytes_left,
1251 name_off, name_len);
1252 if (eb != eb_in) {
1253 btrfs_tree_read_unlock_blocking(eb);
1254 free_extent_buffer(eb);
1255 }
1256 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1257 if (ret > 0)
1258 ret = -ENOENT;
1259 if (ret)
1260 break;
1261
1262 next_inum = found_key.offset;
1263
1264 /* regular exit ahead */
1265 if (parent == next_inum)
1266 break;
1267
1268 slot = path->slots[0];
1269 eb = path->nodes[0];
1270 /* make sure we can use eb after releasing the path */
1271 if (eb != eb_in) {
1272 atomic_inc(&eb->refs);
1273 btrfs_tree_read_lock(eb);
1274 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1275 }
1276 btrfs_release_path(path);
1277 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1278
1279 name_len = btrfs_inode_ref_name_len(eb, iref);
1280 name_off = (unsigned long)(iref + 1);
1281
1282 parent = next_inum;
1283 --bytes_left;
1284 if (bytes_left >= 0)
1285 dest[bytes_left] = '/';
1286 }
1287
1288 btrfs_release_path(path);
1289 path->leave_spinning = leave_spinning;
1290
1291 if (ret)
1292 return ERR_PTR(ret);
1293
1294 return dest + bytes_left;
1295 }
1296
1297 /*
1298 * this makes the path point to (logical EXTENT_ITEM *)
1299 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1300 * tree blocks and <0 on error.
1301 */
1302 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1303 struct btrfs_path *path, struct btrfs_key *found_key,
1304 u64 *flags_ret)
1305 {
1306 int ret;
1307 u64 flags;
1308 u64 size = 0;
1309 u32 item_size;
1310 struct extent_buffer *eb;
1311 struct btrfs_extent_item *ei;
1312 struct btrfs_key key;
1313
1314 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1315 key.type = BTRFS_METADATA_ITEM_KEY;
1316 else
1317 key.type = BTRFS_EXTENT_ITEM_KEY;
1318 key.objectid = logical;
1319 key.offset = (u64)-1;
1320
1321 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1322 if (ret < 0)
1323 return ret;
1324
1325 while (1) {
1326 u32 nritems;
1327 if (path->slots[0] == 0) {
1328 btrfs_set_path_blocking(path);
1329 ret = btrfs_prev_leaf(fs_info->extent_root, path);
1330 if (ret != 0) {
1331 if (ret > 0) {
1332 pr_debug("logical %llu is not within "
1333 "any extent\n", logical);
1334 ret = -ENOENT;
1335 }
1336 return ret;
1337 }
1338 } else {
1339 path->slots[0]--;
1340 }
1341 nritems = btrfs_header_nritems(path->nodes[0]);
1342 if (nritems == 0) {
1343 pr_debug("logical %llu is not within any extent\n",
1344 logical);
1345 return -ENOENT;
1346 }
1347 if (path->slots[0] == nritems)
1348 path->slots[0]--;
1349
1350 btrfs_item_key_to_cpu(path->nodes[0], found_key,
1351 path->slots[0]);
1352 if (found_key->type == BTRFS_EXTENT_ITEM_KEY ||
1353 found_key->type == BTRFS_METADATA_ITEM_KEY)
1354 break;
1355 }
1356
1357 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1358 size = fs_info->extent_root->leafsize;
1359 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1360 size = found_key->offset;
1361
1362 if (found_key->objectid > logical ||
1363 found_key->objectid + size <= logical) {
1364 pr_debug("logical %llu is not within any extent\n", logical);
1365 return -ENOENT;
1366 }
1367
1368 eb = path->nodes[0];
1369 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1370 BUG_ON(item_size < sizeof(*ei));
1371
1372 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1373 flags = btrfs_extent_flags(eb, ei);
1374
1375 pr_debug("logical %llu is at position %llu within the extent (%llu "
1376 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1377 logical, logical - found_key->objectid, found_key->objectid,
1378 found_key->offset, flags, item_size);
1379
1380 WARN_ON(!flags_ret);
1381 if (flags_ret) {
1382 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1383 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1384 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1385 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1386 else
1387 BUG_ON(1);
1388 return 0;
1389 }
1390
1391 return -EIO;
1392 }
1393
1394 /*
1395 * helper function to iterate extent inline refs. ptr must point to a 0 value
1396 * for the first call and may be modified. it is used to track state.
1397 * if more refs exist, 0 is returned and the next call to
1398 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1399 * next ref. after the last ref was processed, 1 is returned.
1400 * returns <0 on error
1401 */
1402 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1403 struct btrfs_extent_item *ei, u32 item_size,
1404 struct btrfs_extent_inline_ref **out_eiref,
1405 int *out_type)
1406 {
1407 unsigned long end;
1408 u64 flags;
1409 struct btrfs_tree_block_info *info;
1410
1411 if (!*ptr) {
1412 /* first call */
1413 flags = btrfs_extent_flags(eb, ei);
1414 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1415 info = (struct btrfs_tree_block_info *)(ei + 1);
1416 *out_eiref =
1417 (struct btrfs_extent_inline_ref *)(info + 1);
1418 } else {
1419 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1420 }
1421 *ptr = (unsigned long)*out_eiref;
1422 if ((void *)*ptr >= (void *)ei + item_size)
1423 return -ENOENT;
1424 }
1425
1426 end = (unsigned long)ei + item_size;
1427 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1428 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1429
1430 *ptr += btrfs_extent_inline_ref_size(*out_type);
1431 WARN_ON(*ptr > end);
1432 if (*ptr == end)
1433 return 1; /* last */
1434
1435 return 0;
1436 }
1437
1438 /*
1439 * reads the tree block backref for an extent. tree level and root are returned
1440 * through out_level and out_root. ptr must point to a 0 value for the first
1441 * call and may be modified (see __get_extent_inline_ref comment).
1442 * returns 0 if data was provided, 1 if there was no more data to provide or
1443 * <0 on error.
1444 */
1445 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1446 struct btrfs_extent_item *ei, u32 item_size,
1447 u64 *out_root, u8 *out_level)
1448 {
1449 int ret;
1450 int type;
1451 struct btrfs_tree_block_info *info;
1452 struct btrfs_extent_inline_ref *eiref;
1453
1454 if (*ptr == (unsigned long)-1)
1455 return 1;
1456
1457 while (1) {
1458 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1459 &eiref, &type);
1460 if (ret < 0)
1461 return ret;
1462
1463 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1464 type == BTRFS_SHARED_BLOCK_REF_KEY)
1465 break;
1466
1467 if (ret == 1)
1468 return 1;
1469 }
1470
1471 /* we can treat both ref types equally here */
1472 info = (struct btrfs_tree_block_info *)(ei + 1);
1473 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1474 *out_level = btrfs_tree_block_level(eb, info);
1475
1476 if (ret == 1)
1477 *ptr = (unsigned long)-1;
1478
1479 return 0;
1480 }
1481
1482 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1483 u64 root, u64 extent_item_objectid,
1484 iterate_extent_inodes_t *iterate, void *ctx)
1485 {
1486 struct extent_inode_elem *eie;
1487 int ret = 0;
1488
1489 for (eie = inode_list; eie; eie = eie->next) {
1490 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1491 "root %llu\n", extent_item_objectid,
1492 eie->inum, eie->offset, root);
1493 ret = iterate(eie->inum, eie->offset, root, ctx);
1494 if (ret) {
1495 pr_debug("stopping iteration for %llu due to ret=%d\n",
1496 extent_item_objectid, ret);
1497 break;
1498 }
1499 }
1500
1501 return ret;
1502 }
1503
1504 /*
1505 * calls iterate() for every inode that references the extent identified by
1506 * the given parameters.
1507 * when the iterator function returns a non-zero value, iteration stops.
1508 */
1509 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1510 u64 extent_item_objectid, u64 extent_item_pos,
1511 int search_commit_root,
1512 iterate_extent_inodes_t *iterate, void *ctx)
1513 {
1514 int ret;
1515 struct btrfs_trans_handle *trans = NULL;
1516 struct ulist *refs = NULL;
1517 struct ulist *roots = NULL;
1518 struct ulist_node *ref_node = NULL;
1519 struct ulist_node *root_node = NULL;
1520 struct seq_list tree_mod_seq_elem = {};
1521 struct ulist_iterator ref_uiter;
1522 struct ulist_iterator root_uiter;
1523
1524 pr_debug("resolving all inodes for extent %llu\n",
1525 extent_item_objectid);
1526
1527 if (!search_commit_root) {
1528 trans = btrfs_join_transaction(fs_info->extent_root);
1529 if (IS_ERR(trans))
1530 return PTR_ERR(trans);
1531 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1532 }
1533
1534 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1535 tree_mod_seq_elem.seq, &refs,
1536 &extent_item_pos);
1537 if (ret)
1538 goto out;
1539
1540 ULIST_ITER_INIT(&ref_uiter);
1541 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1542 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1543 tree_mod_seq_elem.seq, &roots);
1544 if (ret)
1545 break;
1546 ULIST_ITER_INIT(&root_uiter);
1547 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1548 pr_debug("root %llu references leaf %llu, data list "
1549 "%#llx\n", root_node->val, ref_node->val,
1550 ref_node->aux);
1551 ret = iterate_leaf_refs((struct extent_inode_elem *)
1552 (uintptr_t)ref_node->aux,
1553 root_node->val,
1554 extent_item_objectid,
1555 iterate, ctx);
1556 }
1557 ulist_free(roots);
1558 }
1559
1560 free_leaf_list(refs);
1561 out:
1562 if (!search_commit_root) {
1563 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1564 btrfs_end_transaction(trans, fs_info->extent_root);
1565 }
1566
1567 return ret;
1568 }
1569
1570 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1571 struct btrfs_path *path,
1572 iterate_extent_inodes_t *iterate, void *ctx)
1573 {
1574 int ret;
1575 u64 extent_item_pos;
1576 u64 flags = 0;
1577 struct btrfs_key found_key;
1578 int search_commit_root = path->search_commit_root;
1579
1580 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1581 btrfs_release_path(path);
1582 if (ret < 0)
1583 return ret;
1584 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1585 return -EINVAL;
1586
1587 extent_item_pos = logical - found_key.objectid;
1588 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1589 extent_item_pos, search_commit_root,
1590 iterate, ctx);
1591
1592 return ret;
1593 }
1594
1595 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1596 struct extent_buffer *eb, void *ctx);
1597
1598 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1599 struct btrfs_path *path,
1600 iterate_irefs_t *iterate, void *ctx)
1601 {
1602 int ret = 0;
1603 int slot;
1604 u32 cur;
1605 u32 len;
1606 u32 name_len;
1607 u64 parent = 0;
1608 int found = 0;
1609 struct extent_buffer *eb;
1610 struct btrfs_item *item;
1611 struct btrfs_inode_ref *iref;
1612 struct btrfs_key found_key;
1613
1614 while (!ret) {
1615 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1616 &found_key);
1617 if (ret < 0)
1618 break;
1619 if (ret) {
1620 ret = found ? 0 : -ENOENT;
1621 break;
1622 }
1623 ++found;
1624
1625 parent = found_key.offset;
1626 slot = path->slots[0];
1627 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1628 if (!eb) {
1629 ret = -ENOMEM;
1630 break;
1631 }
1632 extent_buffer_get(eb);
1633 btrfs_tree_read_lock(eb);
1634 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1635 btrfs_release_path(path);
1636
1637 item = btrfs_item_nr(slot);
1638 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1639
1640 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1641 name_len = btrfs_inode_ref_name_len(eb, iref);
1642 /* path must be released before calling iterate()! */
1643 pr_debug("following ref at offset %u for inode %llu in "
1644 "tree %llu\n", cur, found_key.objectid,
1645 fs_root->objectid);
1646 ret = iterate(parent, name_len,
1647 (unsigned long)(iref + 1), eb, ctx);
1648 if (ret)
1649 break;
1650 len = sizeof(*iref) + name_len;
1651 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1652 }
1653 btrfs_tree_read_unlock_blocking(eb);
1654 free_extent_buffer(eb);
1655 }
1656
1657 btrfs_release_path(path);
1658
1659 return ret;
1660 }
1661
1662 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1663 struct btrfs_path *path,
1664 iterate_irefs_t *iterate, void *ctx)
1665 {
1666 int ret;
1667 int slot;
1668 u64 offset = 0;
1669 u64 parent;
1670 int found = 0;
1671 struct extent_buffer *eb;
1672 struct btrfs_inode_extref *extref;
1673 struct extent_buffer *leaf;
1674 u32 item_size;
1675 u32 cur_offset;
1676 unsigned long ptr;
1677
1678 while (1) {
1679 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1680 &offset);
1681 if (ret < 0)
1682 break;
1683 if (ret) {
1684 ret = found ? 0 : -ENOENT;
1685 break;
1686 }
1687 ++found;
1688
1689 slot = path->slots[0];
1690 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1691 if (!eb) {
1692 ret = -ENOMEM;
1693 break;
1694 }
1695 extent_buffer_get(eb);
1696
1697 btrfs_tree_read_lock(eb);
1698 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1699 btrfs_release_path(path);
1700
1701 leaf = path->nodes[0];
1702 item_size = btrfs_item_size_nr(leaf, slot);
1703 ptr = btrfs_item_ptr_offset(leaf, slot);
1704 cur_offset = 0;
1705
1706 while (cur_offset < item_size) {
1707 u32 name_len;
1708
1709 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1710 parent = btrfs_inode_extref_parent(eb, extref);
1711 name_len = btrfs_inode_extref_name_len(eb, extref);
1712 ret = iterate(parent, name_len,
1713 (unsigned long)&extref->name, eb, ctx);
1714 if (ret)
1715 break;
1716
1717 cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1718 cur_offset += sizeof(*extref);
1719 }
1720 btrfs_tree_read_unlock_blocking(eb);
1721 free_extent_buffer(eb);
1722
1723 offset++;
1724 }
1725
1726 btrfs_release_path(path);
1727
1728 return ret;
1729 }
1730
1731 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1732 struct btrfs_path *path, iterate_irefs_t *iterate,
1733 void *ctx)
1734 {
1735 int ret;
1736 int found_refs = 0;
1737
1738 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1739 if (!ret)
1740 ++found_refs;
1741 else if (ret != -ENOENT)
1742 return ret;
1743
1744 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1745 if (ret == -ENOENT && found_refs)
1746 return 0;
1747
1748 return ret;
1749 }
1750
1751 /*
1752 * returns 0 if the path could be dumped (probably truncated)
1753 * returns <0 in case of an error
1754 */
1755 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1756 struct extent_buffer *eb, void *ctx)
1757 {
1758 struct inode_fs_paths *ipath = ctx;
1759 char *fspath;
1760 char *fspath_min;
1761 int i = ipath->fspath->elem_cnt;
1762 const int s_ptr = sizeof(char *);
1763 u32 bytes_left;
1764
1765 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1766 ipath->fspath->bytes_left - s_ptr : 0;
1767
1768 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1769 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1770 name_off, eb, inum, fspath_min, bytes_left);
1771 if (IS_ERR(fspath))
1772 return PTR_ERR(fspath);
1773
1774 if (fspath > fspath_min) {
1775 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1776 ++ipath->fspath->elem_cnt;
1777 ipath->fspath->bytes_left = fspath - fspath_min;
1778 } else {
1779 ++ipath->fspath->elem_missed;
1780 ipath->fspath->bytes_missing += fspath_min - fspath;
1781 ipath->fspath->bytes_left = 0;
1782 }
1783
1784 return 0;
1785 }
1786
1787 /*
1788 * this dumps all file system paths to the inode into the ipath struct, provided
1789 * is has been created large enough. each path is zero-terminated and accessed
1790 * from ipath->fspath->val[i].
1791 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1792 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1793 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1794 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1795 * have been needed to return all paths.
1796 */
1797 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1798 {
1799 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1800 inode_to_path, ipath);
1801 }
1802
1803 struct btrfs_data_container *init_data_container(u32 total_bytes)
1804 {
1805 struct btrfs_data_container *data;
1806 size_t alloc_bytes;
1807
1808 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1809 data = vmalloc(alloc_bytes);
1810 if (!data)
1811 return ERR_PTR(-ENOMEM);
1812
1813 if (total_bytes >= sizeof(*data)) {
1814 data->bytes_left = total_bytes - sizeof(*data);
1815 data->bytes_missing = 0;
1816 } else {
1817 data->bytes_missing = sizeof(*data) - total_bytes;
1818 data->bytes_left = 0;
1819 }
1820
1821 data->elem_cnt = 0;
1822 data->elem_missed = 0;
1823
1824 return data;
1825 }
1826
1827 /*
1828 * allocates space to return multiple file system paths for an inode.
1829 * total_bytes to allocate are passed, note that space usable for actual path
1830 * information will be total_bytes - sizeof(struct inode_fs_paths).
1831 * the returned pointer must be freed with free_ipath() in the end.
1832 */
1833 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1834 struct btrfs_path *path)
1835 {
1836 struct inode_fs_paths *ifp;
1837 struct btrfs_data_container *fspath;
1838
1839 fspath = init_data_container(total_bytes);
1840 if (IS_ERR(fspath))
1841 return (void *)fspath;
1842
1843 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1844 if (!ifp) {
1845 kfree(fspath);
1846 return ERR_PTR(-ENOMEM);
1847 }
1848
1849 ifp->btrfs_path = path;
1850 ifp->fspath = fspath;
1851 ifp->fs_root = fs_root;
1852
1853 return ifp;
1854 }
1855
1856 void free_ipath(struct inode_fs_paths *ipath)
1857 {
1858 if (!ipath)
1859 return;
1860 vfree(ipath->fspath);
1861 kfree(ipath);
1862 }
This page took 0.0694 seconds and 4 git commands to generate.