2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
29 #include "transaction.h"
30 #include "dev-replace.h"
35 * This is the implementation for the generic read ahead framework.
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
58 #define MAX_IN_FLIGHT 6
61 struct list_head list
;
62 struct reada_control
*rc
;
70 struct list_head extctl
;
73 struct reada_zone
*zones
[BTRFS_MAX_MIRRORS
];
82 struct list_head list
;
85 struct btrfs_device
*device
;
86 struct btrfs_device
*devs
[BTRFS_MAX_MIRRORS
]; /* full list, incl
92 struct reada_machine_work
{
93 struct btrfs_work work
;
94 struct btrfs_fs_info
*fs_info
;
97 static void reada_extent_put(struct btrfs_fs_info
*, struct reada_extent
*);
98 static void reada_control_release(struct kref
*kref
);
99 static void reada_zone_release(struct kref
*kref
);
100 static void reada_start_machine(struct btrfs_fs_info
*fs_info
);
101 static void __reada_start_machine(struct btrfs_fs_info
*fs_info
);
103 static int reada_add_block(struct reada_control
*rc
, u64 logical
,
104 struct btrfs_key
*top
, u64 generation
);
107 /* in case of err, eb might be NULL */
108 static void __readahead_hook(struct btrfs_fs_info
*fs_info
,
109 struct reada_extent
*re
, struct extent_buffer
*eb
,
117 struct list_head list
;
120 level
= btrfs_header_level(eb
);
122 spin_lock(&re
->lock
);
124 * just take the full list from the extent. afterwards we
125 * don't need the lock anymore
127 list_replace_init(&re
->extctl
, &list
);
129 spin_unlock(&re
->lock
);
132 * this is the error case, the extent buffer has not been
133 * read correctly. We won't access anything from it and
134 * just cleanup our data structures. Effectively this will
135 * cut the branch below this node from read ahead.
141 * FIXME: currently we just set nritems to 0 if this is a leaf,
142 * effectively ignoring the content. In a next step we could
143 * trigger more readahead depending from the content, e.g.
144 * fetch the checksums for the extents in the leaf.
149 nritems
= btrfs_header_nritems(eb
);
150 generation
= btrfs_header_generation(eb
);
151 for (i
= 0; i
< nritems
; i
++) {
152 struct reada_extctl
*rec
;
154 struct btrfs_key key
;
155 struct btrfs_key next_key
;
157 btrfs_node_key_to_cpu(eb
, &key
, i
);
159 btrfs_node_key_to_cpu(eb
, &next_key
, i
+ 1);
162 bytenr
= btrfs_node_blockptr(eb
, i
);
163 n_gen
= btrfs_node_ptr_generation(eb
, i
);
165 list_for_each_entry(rec
, &list
, list
) {
166 struct reada_control
*rc
= rec
->rc
;
169 * if the generation doesn't match, just ignore this
170 * extctl. This will probably cut off a branch from
171 * prefetch. Alternatively one could start a new (sub-)
172 * prefetch for this branch, starting again from root.
173 * FIXME: move the generation check out of this loop
176 if (rec
->generation
!= generation
) {
178 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
179 key
.objectid
, key
.type
, key
.offset
,
180 rec
->generation
, generation
);
183 if (rec
->generation
== generation
&&
184 btrfs_comp_cpu_keys(&key
, &rc
->key_end
) < 0 &&
185 btrfs_comp_cpu_keys(&next_key
, &rc
->key_start
) > 0)
186 reada_add_block(rc
, bytenr
, &next_key
, n_gen
);
192 * free extctl records
194 while (!list_empty(&list
)) {
195 struct reada_control
*rc
;
196 struct reada_extctl
*rec
;
198 rec
= list_first_entry(&list
, struct reada_extctl
, list
);
199 list_del(&rec
->list
);
203 kref_get(&rc
->refcnt
);
204 if (atomic_dec_and_test(&rc
->elems
)) {
205 kref_put(&rc
->refcnt
, reada_control_release
);
208 kref_put(&rc
->refcnt
, reada_control_release
);
210 reada_extent_put(fs_info
, re
); /* one ref for each entry */
217 * start is passed separately in case eb in NULL, which may be the case with
220 int btree_readahead_hook(struct btrfs_fs_info
*fs_info
,
221 struct extent_buffer
*eb
, u64 start
, int err
)
224 struct reada_extent
*re
;
227 spin_lock(&fs_info
->reada_lock
);
228 re
= radix_tree_lookup(&fs_info
->reada_tree
,
229 start
>> PAGE_CACHE_SHIFT
);
232 spin_unlock(&fs_info
->reada_lock
);
238 __readahead_hook(fs_info
, re
, eb
, start
, err
);
239 reada_extent_put(fs_info
, re
); /* our ref */
242 reada_start_machine(fs_info
);
246 static struct reada_zone
*reada_find_zone(struct btrfs_fs_info
*fs_info
,
247 struct btrfs_device
*dev
, u64 logical
,
248 struct btrfs_bio
*bbio
)
251 struct reada_zone
*zone
;
252 struct btrfs_block_group_cache
*cache
= NULL
;
258 spin_lock(&fs_info
->reada_lock
);
259 ret
= radix_tree_gang_lookup(&dev
->reada_zones
, (void **)&zone
,
260 logical
>> PAGE_CACHE_SHIFT
, 1);
261 if (ret
== 1 && logical
>= zone
->start
&& logical
<= zone
->end
) {
262 kref_get(&zone
->refcnt
);
263 spin_unlock(&fs_info
->reada_lock
);
267 spin_unlock(&fs_info
->reada_lock
);
269 cache
= btrfs_lookup_block_group(fs_info
, logical
);
273 start
= cache
->key
.objectid
;
274 end
= start
+ cache
->key
.offset
- 1;
275 btrfs_put_block_group(cache
);
277 zone
= kzalloc(sizeof(*zone
), GFP_NOFS
);
283 INIT_LIST_HEAD(&zone
->list
);
284 spin_lock_init(&zone
->lock
);
286 kref_init(&zone
->refcnt
);
288 zone
->device
= dev
; /* our device always sits at index 0 */
289 for (i
= 0; i
< bbio
->num_stripes
; ++i
) {
290 /* bounds have already been checked */
291 zone
->devs
[i
] = bbio
->stripes
[i
].dev
;
293 zone
->ndevs
= bbio
->num_stripes
;
295 spin_lock(&fs_info
->reada_lock
);
296 ret
= radix_tree_insert(&dev
->reada_zones
,
297 (unsigned long)(zone
->end
>> PAGE_CACHE_SHIFT
),
300 if (ret
== -EEXIST
) {
302 ret
= radix_tree_gang_lookup(&dev
->reada_zones
, (void **)&zone
,
303 logical
>> PAGE_CACHE_SHIFT
, 1);
304 if (ret
== 1 && logical
>= zone
->start
&& logical
<= zone
->end
)
305 kref_get(&zone
->refcnt
);
309 spin_unlock(&fs_info
->reada_lock
);
314 static struct reada_extent
*reada_find_extent(struct btrfs_root
*root
,
316 struct btrfs_key
*top
)
319 struct reada_extent
*re
= NULL
;
320 struct reada_extent
*re_exist
= NULL
;
321 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
322 struct btrfs_bio
*bbio
= NULL
;
323 struct btrfs_device
*dev
;
324 struct btrfs_device
*prev_dev
;
329 unsigned long index
= logical
>> PAGE_CACHE_SHIFT
;
330 int dev_replace_is_ongoing
;
333 spin_lock(&fs_info
->reada_lock
);
334 re
= radix_tree_lookup(&fs_info
->reada_tree
, index
);
337 spin_unlock(&fs_info
->reada_lock
);
342 re
= kzalloc(sizeof(*re
), GFP_NOFS
);
346 blocksize
= root
->nodesize
;
347 re
->logical
= logical
;
349 INIT_LIST_HEAD(&re
->extctl
);
350 spin_lock_init(&re
->lock
);
357 ret
= btrfs_map_block(fs_info
, REQ_GET_READ_MIRRORS
, logical
, &length
,
359 if (ret
|| !bbio
|| length
< blocksize
)
362 if (bbio
->num_stripes
> BTRFS_MAX_MIRRORS
) {
363 btrfs_err(root
->fs_info
,
364 "readahead: more than %d copies not supported",
369 real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
370 for (nzones
= 0; nzones
< real_stripes
; ++nzones
) {
371 struct reada_zone
*zone
;
373 dev
= bbio
->stripes
[nzones
].dev
;
374 zone
= reada_find_zone(fs_info
, dev
, logical
, bbio
);
378 re
->zones
[re
->nzones
++] = zone
;
379 spin_lock(&zone
->lock
);
381 kref_get(&zone
->refcnt
);
383 spin_unlock(&zone
->lock
);
384 spin_lock(&fs_info
->reada_lock
);
385 kref_put(&zone
->refcnt
, reada_zone_release
);
386 spin_unlock(&fs_info
->reada_lock
);
388 if (re
->nzones
== 0) {
389 /* not a single zone found, error and out */
393 /* insert extent in reada_tree + all per-device trees, all or nothing */
394 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
395 spin_lock(&fs_info
->reada_lock
);
396 ret
= radix_tree_insert(&fs_info
->reada_tree
, index
, re
);
397 if (ret
== -EEXIST
) {
398 re_exist
= radix_tree_lookup(&fs_info
->reada_tree
, index
);
401 spin_unlock(&fs_info
->reada_lock
);
402 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
406 spin_unlock(&fs_info
->reada_lock
);
407 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
411 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(
412 &fs_info
->dev_replace
);
413 for (nzones
= 0; nzones
< re
->nzones
; ++nzones
) {
414 dev
= re
->zones
[nzones
]->device
;
416 if (dev
== prev_dev
) {
418 * in case of DUP, just add the first zone. As both
419 * are on the same device, there's nothing to gain
421 * Also, it wouldn't work, as the tree is per device
422 * and adding would fail with EEXIST
428 * cannot read ahead on missing device, but for RAID5/6,
429 * REQ_GET_READ_MIRRORS return 1. So don't skip missing
430 * device for such case.
435 if (dev_replace_is_ongoing
&&
436 dev
== fs_info
->dev_replace
.tgtdev
) {
438 * as this device is selected for reading only as
439 * a last resort, skip it for read ahead.
444 ret
= radix_tree_insert(&dev
->reada_extents
, index
, re
);
446 while (--nzones
>= 0) {
447 dev
= re
->zones
[nzones
]->device
;
449 /* ignore whether the entry was inserted */
450 radix_tree_delete(&dev
->reada_extents
, index
);
452 BUG_ON(fs_info
== NULL
);
453 radix_tree_delete(&fs_info
->reada_tree
, index
);
454 spin_unlock(&fs_info
->reada_lock
);
455 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
460 spin_unlock(&fs_info
->reada_lock
);
461 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
466 btrfs_put_bbio(bbio
);
470 for (nzones
= 0; nzones
< re
->nzones
; ++nzones
) {
471 struct reada_zone
*zone
;
473 zone
= re
->zones
[nzones
];
474 kref_get(&zone
->refcnt
);
475 spin_lock(&zone
->lock
);
477 if (zone
->elems
== 0) {
479 * no fs_info->reada_lock needed, as this can't be
482 kref_put(&zone
->refcnt
, reada_zone_release
);
484 spin_unlock(&zone
->lock
);
486 spin_lock(&fs_info
->reada_lock
);
487 kref_put(&zone
->refcnt
, reada_zone_release
);
488 spin_unlock(&fs_info
->reada_lock
);
490 btrfs_put_bbio(bbio
);
495 static void reada_extent_put(struct btrfs_fs_info
*fs_info
,
496 struct reada_extent
*re
)
499 unsigned long index
= re
->logical
>> PAGE_CACHE_SHIFT
;
501 spin_lock(&fs_info
->reada_lock
);
503 spin_unlock(&fs_info
->reada_lock
);
507 radix_tree_delete(&fs_info
->reada_tree
, index
);
508 for (i
= 0; i
< re
->nzones
; ++i
) {
509 struct reada_zone
*zone
= re
->zones
[i
];
511 radix_tree_delete(&zone
->device
->reada_extents
, index
);
514 spin_unlock(&fs_info
->reada_lock
);
516 for (i
= 0; i
< re
->nzones
; ++i
) {
517 struct reada_zone
*zone
= re
->zones
[i
];
519 kref_get(&zone
->refcnt
);
520 spin_lock(&zone
->lock
);
522 if (zone
->elems
== 0) {
523 /* no fs_info->reada_lock needed, as this can't be
525 kref_put(&zone
->refcnt
, reada_zone_release
);
527 spin_unlock(&zone
->lock
);
529 spin_lock(&fs_info
->reada_lock
);
530 kref_put(&zone
->refcnt
, reada_zone_release
);
531 spin_unlock(&fs_info
->reada_lock
);
537 static void reada_zone_release(struct kref
*kref
)
539 struct reada_zone
*zone
= container_of(kref
, struct reada_zone
, refcnt
);
541 radix_tree_delete(&zone
->device
->reada_zones
,
542 zone
->end
>> PAGE_CACHE_SHIFT
);
547 static void reada_control_release(struct kref
*kref
)
549 struct reada_control
*rc
= container_of(kref
, struct reada_control
,
555 static int reada_add_block(struct reada_control
*rc
, u64 logical
,
556 struct btrfs_key
*top
, u64 generation
)
558 struct btrfs_root
*root
= rc
->root
;
559 struct reada_extent
*re
;
560 struct reada_extctl
*rec
;
562 re
= reada_find_extent(root
, logical
, top
); /* takes one ref */
566 rec
= kzalloc(sizeof(*rec
), GFP_NOFS
);
568 reada_extent_put(root
->fs_info
, re
);
573 rec
->generation
= generation
;
574 atomic_inc(&rc
->elems
);
576 spin_lock(&re
->lock
);
577 list_add_tail(&rec
->list
, &re
->extctl
);
578 spin_unlock(&re
->lock
);
580 /* leave the ref on the extent */
586 * called with fs_info->reada_lock held
588 static void reada_peer_zones_set_lock(struct reada_zone
*zone
, int lock
)
591 unsigned long index
= zone
->end
>> PAGE_CACHE_SHIFT
;
593 for (i
= 0; i
< zone
->ndevs
; ++i
) {
594 struct reada_zone
*peer
;
595 peer
= radix_tree_lookup(&zone
->devs
[i
]->reada_zones
, index
);
596 if (peer
&& peer
->device
!= zone
->device
)
602 * called with fs_info->reada_lock held
604 static int reada_pick_zone(struct btrfs_device
*dev
)
606 struct reada_zone
*top_zone
= NULL
;
607 struct reada_zone
*top_locked_zone
= NULL
;
609 u64 top_locked_elems
= 0;
610 unsigned long index
= 0;
613 if (dev
->reada_curr_zone
) {
614 reada_peer_zones_set_lock(dev
->reada_curr_zone
, 0);
615 kref_put(&dev
->reada_curr_zone
->refcnt
, reada_zone_release
);
616 dev
->reada_curr_zone
= NULL
;
618 /* pick the zone with the most elements */
620 struct reada_zone
*zone
;
622 ret
= radix_tree_gang_lookup(&dev
->reada_zones
,
623 (void **)&zone
, index
, 1);
626 index
= (zone
->end
>> PAGE_CACHE_SHIFT
) + 1;
628 if (zone
->elems
> top_locked_elems
) {
629 top_locked_elems
= zone
->elems
;
630 top_locked_zone
= zone
;
633 if (zone
->elems
> top_elems
) {
634 top_elems
= zone
->elems
;
640 dev
->reada_curr_zone
= top_zone
;
641 else if (top_locked_zone
)
642 dev
->reada_curr_zone
= top_locked_zone
;
646 dev
->reada_next
= dev
->reada_curr_zone
->start
;
647 kref_get(&dev
->reada_curr_zone
->refcnt
);
648 reada_peer_zones_set_lock(dev
->reada_curr_zone
, 1);
653 static int reada_start_machine_dev(struct btrfs_fs_info
*fs_info
,
654 struct btrfs_device
*dev
)
656 struct reada_extent
*re
= NULL
;
658 struct extent_buffer
*eb
= NULL
;
663 spin_lock(&fs_info
->reada_lock
);
664 if (dev
->reada_curr_zone
== NULL
) {
665 ret
= reada_pick_zone(dev
);
667 spin_unlock(&fs_info
->reada_lock
);
672 * FIXME currently we issue the reads one extent at a time. If we have
673 * a contiguous block of extents, we could also coagulate them or use
674 * plugging to speed things up
676 ret
= radix_tree_gang_lookup(&dev
->reada_extents
, (void **)&re
,
677 dev
->reada_next
>> PAGE_CACHE_SHIFT
, 1);
678 if (ret
== 0 || re
->logical
> dev
->reada_curr_zone
->end
) {
679 ret
= reada_pick_zone(dev
);
681 spin_unlock(&fs_info
->reada_lock
);
685 ret
= radix_tree_gang_lookup(&dev
->reada_extents
, (void **)&re
,
686 dev
->reada_next
>> PAGE_CACHE_SHIFT
, 1);
689 spin_unlock(&fs_info
->reada_lock
);
692 dev
->reada_next
= re
->logical
+ fs_info
->tree_root
->nodesize
;
695 spin_unlock(&fs_info
->reada_lock
);
697 spin_lock(&re
->lock
);
698 if (re
->scheduled
|| list_empty(&re
->extctl
)) {
699 spin_unlock(&re
->lock
);
700 reada_extent_put(fs_info
, re
);
704 spin_unlock(&re
->lock
);
709 for (i
= 0; i
< re
->nzones
; ++i
) {
710 if (re
->zones
[i
]->device
== dev
) {
715 logical
= re
->logical
;
717 atomic_inc(&dev
->reada_in_flight
);
718 ret
= reada_tree_block_flagged(fs_info
->extent_root
, logical
,
721 __readahead_hook(fs_info
, re
, NULL
, logical
, ret
);
723 __readahead_hook(fs_info
, re
, eb
, eb
->start
, ret
);
726 free_extent_buffer(eb
);
728 atomic_dec(&dev
->reada_in_flight
);
729 reada_extent_put(fs_info
, re
);
735 static void reada_start_machine_worker(struct btrfs_work
*work
)
737 struct reada_machine_work
*rmw
;
738 struct btrfs_fs_info
*fs_info
;
741 rmw
= container_of(work
, struct reada_machine_work
, work
);
742 fs_info
= rmw
->fs_info
;
746 old_ioprio
= IOPRIO_PRIO_VALUE(task_nice_ioclass(current
),
747 task_nice_ioprio(current
));
748 set_task_ioprio(current
, BTRFS_IOPRIO_READA
);
749 __reada_start_machine(fs_info
);
750 set_task_ioprio(current
, old_ioprio
);
752 atomic_dec(&fs_info
->reada_works_cnt
);
755 static void __reada_start_machine(struct btrfs_fs_info
*fs_info
)
757 struct btrfs_device
*device
;
758 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
765 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
766 if (atomic_read(&device
->reada_in_flight
) <
768 enqueued
+= reada_start_machine_dev(fs_info
,
772 } while (enqueued
&& total
< 10000);
778 * If everything is already in the cache, this is effectively single
779 * threaded. To a) not hold the caller for too long and b) to utilize
780 * more cores, we broke the loop above after 10000 iterations and now
781 * enqueue to workers to finish it. This will distribute the load to
784 for (i
= 0; i
< 2; ++i
) {
785 reada_start_machine(fs_info
);
786 if (atomic_read(&fs_info
->reada_works_cnt
) >
787 BTRFS_MAX_MIRRORS
* 2)
792 static void reada_start_machine(struct btrfs_fs_info
*fs_info
)
794 struct reada_machine_work
*rmw
;
796 rmw
= kzalloc(sizeof(*rmw
), GFP_NOFS
);
798 /* FIXME we cannot handle this properly right now */
801 btrfs_init_work(&rmw
->work
, btrfs_readahead_helper
,
802 reada_start_machine_worker
, NULL
, NULL
);
803 rmw
->fs_info
= fs_info
;
805 btrfs_queue_work(fs_info
->readahead_workers
, &rmw
->work
);
806 atomic_inc(&fs_info
->reada_works_cnt
);
810 static void dump_devs(struct btrfs_fs_info
*fs_info
, int all
)
812 struct btrfs_device
*device
;
813 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
820 spin_lock(&fs_info
->reada_lock
);
821 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
822 printk(KERN_DEBUG
"dev %lld has %d in flight\n", device
->devid
,
823 atomic_read(&device
->reada_in_flight
));
826 struct reada_zone
*zone
;
827 ret
= radix_tree_gang_lookup(&device
->reada_zones
,
828 (void **)&zone
, index
, 1);
831 printk(KERN_DEBUG
" zone %llu-%llu elems %llu locked "
832 "%d devs", zone
->start
, zone
->end
, zone
->elems
,
834 for (j
= 0; j
< zone
->ndevs
; ++j
) {
835 printk(KERN_CONT
" %lld",
836 zone
->devs
[j
]->devid
);
838 if (device
->reada_curr_zone
== zone
)
839 printk(KERN_CONT
" curr off %llu",
840 device
->reada_next
- zone
->start
);
841 printk(KERN_CONT
"\n");
842 index
= (zone
->end
>> PAGE_CACHE_SHIFT
) + 1;
847 struct reada_extent
*re
= NULL
;
849 ret
= radix_tree_gang_lookup(&device
->reada_extents
,
850 (void **)&re
, index
, 1);
854 " re: logical %llu size %u empty %d scheduled %d",
855 re
->logical
, fs_info
->tree_root
->nodesize
,
856 list_empty(&re
->extctl
), re
->scheduled
);
858 for (i
= 0; i
< re
->nzones
; ++i
) {
859 printk(KERN_CONT
" zone %llu-%llu devs",
862 for (j
= 0; j
< re
->zones
[i
]->ndevs
; ++j
) {
863 printk(KERN_CONT
" %lld",
864 re
->zones
[i
]->devs
[j
]->devid
);
867 printk(KERN_CONT
"\n");
868 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
877 struct reada_extent
*re
= NULL
;
879 ret
= radix_tree_gang_lookup(&fs_info
->reada_tree
, (void **)&re
,
883 if (!re
->scheduled
) {
884 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
888 "re: logical %llu size %u list empty %d scheduled %d",
889 re
->logical
, fs_info
->tree_root
->nodesize
,
890 list_empty(&re
->extctl
), re
->scheduled
);
891 for (i
= 0; i
< re
->nzones
; ++i
) {
892 printk(KERN_CONT
" zone %llu-%llu devs",
895 for (j
= 0; j
< re
->zones
[i
]->ndevs
; ++j
) {
896 printk(KERN_CONT
" %lld",
897 re
->zones
[i
]->devs
[j
]->devid
);
900 printk(KERN_CONT
"\n");
901 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
903 spin_unlock(&fs_info
->reada_lock
);
910 struct reada_control
*btrfs_reada_add(struct btrfs_root
*root
,
911 struct btrfs_key
*key_start
, struct btrfs_key
*key_end
)
913 struct reada_control
*rc
;
917 struct extent_buffer
*node
;
918 static struct btrfs_key max_key
= {
924 rc
= kzalloc(sizeof(*rc
), GFP_NOFS
);
926 return ERR_PTR(-ENOMEM
);
929 rc
->key_start
= *key_start
;
930 rc
->key_end
= *key_end
;
931 atomic_set(&rc
->elems
, 0);
932 init_waitqueue_head(&rc
->wait
);
933 kref_init(&rc
->refcnt
);
934 kref_get(&rc
->refcnt
); /* one ref for having elements */
936 node
= btrfs_root_node(root
);
938 generation
= btrfs_header_generation(node
);
939 free_extent_buffer(node
);
941 ret
= reada_add_block(rc
, start
, &max_key
, generation
);
947 reada_start_machine(root
->fs_info
);
953 int btrfs_reada_wait(void *handle
)
955 struct reada_control
*rc
= handle
;
956 struct btrfs_fs_info
*fs_info
= rc
->root
->fs_info
;
958 while (atomic_read(&rc
->elems
)) {
959 if (!atomic_read(&fs_info
->reada_works_cnt
))
960 reada_start_machine(fs_info
);
961 wait_event_timeout(rc
->wait
, atomic_read(&rc
->elems
) == 0,
963 dump_devs(rc
->root
->fs_info
,
964 atomic_read(&rc
->elems
) < 10 ? 1 : 0);
967 dump_devs(rc
->root
->fs_info
, atomic_read(&rc
->elems
) < 10 ? 1 : 0);
969 kref_put(&rc
->refcnt
, reada_control_release
);
974 int btrfs_reada_wait(void *handle
)
976 struct reada_control
*rc
= handle
;
977 struct btrfs_fs_info
*fs_info
= rc
->root
->fs_info
;
979 while (atomic_read(&rc
->elems
)) {
980 if (!atomic_read(&fs_info
->reada_works_cnt
))
981 reada_start_machine(fs_info
);
982 wait_event_timeout(rc
->wait
, atomic_read(&rc
->elems
) == 0,
986 kref_put(&rc
->refcnt
, reada_control_release
);
992 void btrfs_reada_detach(void *handle
)
994 struct reada_control
*rc
= handle
;
996 kref_put(&rc
->refcnt
, reada_control_release
);