btrfs: reada: avoid undone reada extents in btrfs_reada_wait
[deliverable/linux.git] / fs / btrfs / reada.c
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31
32 #undef DEBUG
33
34 /*
35 * This is the implementation for the generic read ahead framework.
36 *
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 *
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
52 * of the filesystem.
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
56 */
57
58 #define MAX_IN_FLIGHT 6
59
60 struct reada_extctl {
61 struct list_head list;
62 struct reada_control *rc;
63 u64 generation;
64 };
65
66 struct reada_extent {
67 u64 logical;
68 struct btrfs_key top;
69 int err;
70 struct list_head extctl;
71 int refcnt;
72 spinlock_t lock;
73 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
74 int nzones;
75 int scheduled;
76 };
77
78 struct reada_zone {
79 u64 start;
80 u64 end;
81 u64 elems;
82 struct list_head list;
83 spinlock_t lock;
84 int locked;
85 struct btrfs_device *device;
86 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87 * self */
88 int ndevs;
89 struct kref refcnt;
90 };
91
92 struct reada_machine_work {
93 struct btrfs_work work;
94 struct btrfs_fs_info *fs_info;
95 };
96
97 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98 static void reada_control_release(struct kref *kref);
99 static void reada_zone_release(struct kref *kref);
100 static void reada_start_machine(struct btrfs_fs_info *fs_info);
101 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102
103 static int reada_add_block(struct reada_control *rc, u64 logical,
104 struct btrfs_key *top, u64 generation);
105
106 /* recurses */
107 /* in case of err, eb might be NULL */
108 static void __readahead_hook(struct btrfs_fs_info *fs_info,
109 struct reada_extent *re, struct extent_buffer *eb,
110 u64 start, int err)
111 {
112 int level = 0;
113 int nritems;
114 int i;
115 u64 bytenr;
116 u64 generation;
117 struct list_head list;
118
119 if (eb)
120 level = btrfs_header_level(eb);
121
122 spin_lock(&re->lock);
123 /*
124 * just take the full list from the extent. afterwards we
125 * don't need the lock anymore
126 */
127 list_replace_init(&re->extctl, &list);
128 re->scheduled = 0;
129 spin_unlock(&re->lock);
130
131 /*
132 * this is the error case, the extent buffer has not been
133 * read correctly. We won't access anything from it and
134 * just cleanup our data structures. Effectively this will
135 * cut the branch below this node from read ahead.
136 */
137 if (err)
138 goto cleanup;
139
140 /*
141 * FIXME: currently we just set nritems to 0 if this is a leaf,
142 * effectively ignoring the content. In a next step we could
143 * trigger more readahead depending from the content, e.g.
144 * fetch the checksums for the extents in the leaf.
145 */
146 if (!level)
147 goto cleanup;
148
149 nritems = btrfs_header_nritems(eb);
150 generation = btrfs_header_generation(eb);
151 for (i = 0; i < nritems; i++) {
152 struct reada_extctl *rec;
153 u64 n_gen;
154 struct btrfs_key key;
155 struct btrfs_key next_key;
156
157 btrfs_node_key_to_cpu(eb, &key, i);
158 if (i + 1 < nritems)
159 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
160 else
161 next_key = re->top;
162 bytenr = btrfs_node_blockptr(eb, i);
163 n_gen = btrfs_node_ptr_generation(eb, i);
164
165 list_for_each_entry(rec, &list, list) {
166 struct reada_control *rc = rec->rc;
167
168 /*
169 * if the generation doesn't match, just ignore this
170 * extctl. This will probably cut off a branch from
171 * prefetch. Alternatively one could start a new (sub-)
172 * prefetch for this branch, starting again from root.
173 * FIXME: move the generation check out of this loop
174 */
175 #ifdef DEBUG
176 if (rec->generation != generation) {
177 btrfs_debug(fs_info,
178 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
179 key.objectid, key.type, key.offset,
180 rec->generation, generation);
181 }
182 #endif
183 if (rec->generation == generation &&
184 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
185 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
186 reada_add_block(rc, bytenr, &next_key, n_gen);
187 }
188 }
189
190 cleanup:
191 /*
192 * free extctl records
193 */
194 while (!list_empty(&list)) {
195 struct reada_control *rc;
196 struct reada_extctl *rec;
197
198 rec = list_first_entry(&list, struct reada_extctl, list);
199 list_del(&rec->list);
200 rc = rec->rc;
201 kfree(rec);
202
203 kref_get(&rc->refcnt);
204 if (atomic_dec_and_test(&rc->elems)) {
205 kref_put(&rc->refcnt, reada_control_release);
206 wake_up(&rc->wait);
207 }
208 kref_put(&rc->refcnt, reada_control_release);
209
210 reada_extent_put(fs_info, re); /* one ref for each entry */
211 }
212
213 return;
214 }
215
216 /*
217 * start is passed separately in case eb in NULL, which may be the case with
218 * failed I/O
219 */
220 int btree_readahead_hook(struct btrfs_fs_info *fs_info,
221 struct extent_buffer *eb, u64 start, int err)
222 {
223 int ret = 0;
224 struct reada_extent *re;
225
226 /* find extent */
227 spin_lock(&fs_info->reada_lock);
228 re = radix_tree_lookup(&fs_info->reada_tree,
229 start >> PAGE_CACHE_SHIFT);
230 if (re)
231 re->refcnt++;
232 spin_unlock(&fs_info->reada_lock);
233 if (!re) {
234 ret = -1;
235 goto start_machine;
236 }
237
238 __readahead_hook(fs_info, re, eb, start, err);
239 reada_extent_put(fs_info, re); /* our ref */
240
241 start_machine:
242 reada_start_machine(fs_info);
243 return ret;
244 }
245
246 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
247 struct btrfs_device *dev, u64 logical,
248 struct btrfs_bio *bbio)
249 {
250 int ret;
251 struct reada_zone *zone;
252 struct btrfs_block_group_cache *cache = NULL;
253 u64 start;
254 u64 end;
255 int i;
256
257 zone = NULL;
258 spin_lock(&fs_info->reada_lock);
259 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
260 logical >> PAGE_CACHE_SHIFT, 1);
261 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
262 kref_get(&zone->refcnt);
263 spin_unlock(&fs_info->reada_lock);
264 return zone;
265 }
266
267 spin_unlock(&fs_info->reada_lock);
268
269 cache = btrfs_lookup_block_group(fs_info, logical);
270 if (!cache)
271 return NULL;
272
273 start = cache->key.objectid;
274 end = start + cache->key.offset - 1;
275 btrfs_put_block_group(cache);
276
277 zone = kzalloc(sizeof(*zone), GFP_NOFS);
278 if (!zone)
279 return NULL;
280
281 zone->start = start;
282 zone->end = end;
283 INIT_LIST_HEAD(&zone->list);
284 spin_lock_init(&zone->lock);
285 zone->locked = 0;
286 kref_init(&zone->refcnt);
287 zone->elems = 0;
288 zone->device = dev; /* our device always sits at index 0 */
289 for (i = 0; i < bbio->num_stripes; ++i) {
290 /* bounds have already been checked */
291 zone->devs[i] = bbio->stripes[i].dev;
292 }
293 zone->ndevs = bbio->num_stripes;
294
295 spin_lock(&fs_info->reada_lock);
296 ret = radix_tree_insert(&dev->reada_zones,
297 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
298 zone);
299
300 if (ret == -EEXIST) {
301 kfree(zone);
302 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
303 logical >> PAGE_CACHE_SHIFT, 1);
304 if (ret == 1 && logical >= zone->start && logical <= zone->end)
305 kref_get(&zone->refcnt);
306 else
307 zone = NULL;
308 }
309 spin_unlock(&fs_info->reada_lock);
310
311 return zone;
312 }
313
314 static struct reada_extent *reada_find_extent(struct btrfs_root *root,
315 u64 logical,
316 struct btrfs_key *top)
317 {
318 int ret;
319 struct reada_extent *re = NULL;
320 struct reada_extent *re_exist = NULL;
321 struct btrfs_fs_info *fs_info = root->fs_info;
322 struct btrfs_bio *bbio = NULL;
323 struct btrfs_device *dev;
324 struct btrfs_device *prev_dev;
325 u32 blocksize;
326 u64 length;
327 int real_stripes;
328 int nzones = 0;
329 unsigned long index = logical >> PAGE_CACHE_SHIFT;
330 int dev_replace_is_ongoing;
331 int have_zone = 0;
332
333 spin_lock(&fs_info->reada_lock);
334 re = radix_tree_lookup(&fs_info->reada_tree, index);
335 if (re)
336 re->refcnt++;
337 spin_unlock(&fs_info->reada_lock);
338
339 if (re)
340 return re;
341
342 re = kzalloc(sizeof(*re), GFP_NOFS);
343 if (!re)
344 return NULL;
345
346 blocksize = root->nodesize;
347 re->logical = logical;
348 re->top = *top;
349 INIT_LIST_HEAD(&re->extctl);
350 spin_lock_init(&re->lock);
351 re->refcnt = 1;
352
353 /*
354 * map block
355 */
356 length = blocksize;
357 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
358 &bbio, 0);
359 if (ret || !bbio || length < blocksize)
360 goto error;
361
362 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
363 btrfs_err(root->fs_info,
364 "readahead: more than %d copies not supported",
365 BTRFS_MAX_MIRRORS);
366 goto error;
367 }
368
369 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
370 for (nzones = 0; nzones < real_stripes; ++nzones) {
371 struct reada_zone *zone;
372
373 dev = bbio->stripes[nzones].dev;
374 zone = reada_find_zone(fs_info, dev, logical, bbio);
375 if (!zone)
376 continue;
377
378 re->zones[re->nzones++] = zone;
379 spin_lock(&zone->lock);
380 if (!zone->elems)
381 kref_get(&zone->refcnt);
382 ++zone->elems;
383 spin_unlock(&zone->lock);
384 spin_lock(&fs_info->reada_lock);
385 kref_put(&zone->refcnt, reada_zone_release);
386 spin_unlock(&fs_info->reada_lock);
387 }
388 if (re->nzones == 0) {
389 /* not a single zone found, error and out */
390 goto error;
391 }
392
393 /* insert extent in reada_tree + all per-device trees, all or nothing */
394 btrfs_dev_replace_lock(&fs_info->dev_replace);
395 spin_lock(&fs_info->reada_lock);
396 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
397 if (ret == -EEXIST) {
398 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
399 BUG_ON(!re_exist);
400 re_exist->refcnt++;
401 spin_unlock(&fs_info->reada_lock);
402 btrfs_dev_replace_unlock(&fs_info->dev_replace);
403 goto error;
404 }
405 if (ret) {
406 spin_unlock(&fs_info->reada_lock);
407 btrfs_dev_replace_unlock(&fs_info->dev_replace);
408 goto error;
409 }
410 prev_dev = NULL;
411 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
412 &fs_info->dev_replace);
413 for (nzones = 0; nzones < re->nzones; ++nzones) {
414 dev = re->zones[nzones]->device;
415
416 if (dev == prev_dev) {
417 /*
418 * in case of DUP, just add the first zone. As both
419 * are on the same device, there's nothing to gain
420 * from adding both.
421 * Also, it wouldn't work, as the tree is per device
422 * and adding would fail with EEXIST
423 */
424 continue;
425 }
426 if (!dev->bdev) {
427 /*
428 * cannot read ahead on missing device, but for RAID5/6,
429 * REQ_GET_READ_MIRRORS return 1. So don't skip missing
430 * device for such case.
431 */
432 if (nzones > 1)
433 continue;
434 }
435 if (dev_replace_is_ongoing &&
436 dev == fs_info->dev_replace.tgtdev) {
437 /*
438 * as this device is selected for reading only as
439 * a last resort, skip it for read ahead.
440 */
441 continue;
442 }
443 prev_dev = dev;
444 ret = radix_tree_insert(&dev->reada_extents, index, re);
445 if (ret) {
446 while (--nzones >= 0) {
447 dev = re->zones[nzones]->device;
448 BUG_ON(dev == NULL);
449 /* ignore whether the entry was inserted */
450 radix_tree_delete(&dev->reada_extents, index);
451 }
452 BUG_ON(fs_info == NULL);
453 radix_tree_delete(&fs_info->reada_tree, index);
454 spin_unlock(&fs_info->reada_lock);
455 btrfs_dev_replace_unlock(&fs_info->dev_replace);
456 goto error;
457 }
458 have_zone = 1;
459 }
460 spin_unlock(&fs_info->reada_lock);
461 btrfs_dev_replace_unlock(&fs_info->dev_replace);
462
463 if (!have_zone)
464 goto error;
465
466 btrfs_put_bbio(bbio);
467 return re;
468
469 error:
470 for (nzones = 0; nzones < re->nzones; ++nzones) {
471 struct reada_zone *zone;
472
473 zone = re->zones[nzones];
474 kref_get(&zone->refcnt);
475 spin_lock(&zone->lock);
476 --zone->elems;
477 if (zone->elems == 0) {
478 /*
479 * no fs_info->reada_lock needed, as this can't be
480 * the last ref
481 */
482 kref_put(&zone->refcnt, reada_zone_release);
483 }
484 spin_unlock(&zone->lock);
485
486 spin_lock(&fs_info->reada_lock);
487 kref_put(&zone->refcnt, reada_zone_release);
488 spin_unlock(&fs_info->reada_lock);
489 }
490 btrfs_put_bbio(bbio);
491 kfree(re);
492 return re_exist;
493 }
494
495 static void reada_extent_put(struct btrfs_fs_info *fs_info,
496 struct reada_extent *re)
497 {
498 int i;
499 unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
500
501 spin_lock(&fs_info->reada_lock);
502 if (--re->refcnt) {
503 spin_unlock(&fs_info->reada_lock);
504 return;
505 }
506
507 radix_tree_delete(&fs_info->reada_tree, index);
508 for (i = 0; i < re->nzones; ++i) {
509 struct reada_zone *zone = re->zones[i];
510
511 radix_tree_delete(&zone->device->reada_extents, index);
512 }
513
514 spin_unlock(&fs_info->reada_lock);
515
516 for (i = 0; i < re->nzones; ++i) {
517 struct reada_zone *zone = re->zones[i];
518
519 kref_get(&zone->refcnt);
520 spin_lock(&zone->lock);
521 --zone->elems;
522 if (zone->elems == 0) {
523 /* no fs_info->reada_lock needed, as this can't be
524 * the last ref */
525 kref_put(&zone->refcnt, reada_zone_release);
526 }
527 spin_unlock(&zone->lock);
528
529 spin_lock(&fs_info->reada_lock);
530 kref_put(&zone->refcnt, reada_zone_release);
531 spin_unlock(&fs_info->reada_lock);
532 }
533
534 kfree(re);
535 }
536
537 static void reada_zone_release(struct kref *kref)
538 {
539 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
540
541 radix_tree_delete(&zone->device->reada_zones,
542 zone->end >> PAGE_CACHE_SHIFT);
543
544 kfree(zone);
545 }
546
547 static void reada_control_release(struct kref *kref)
548 {
549 struct reada_control *rc = container_of(kref, struct reada_control,
550 refcnt);
551
552 kfree(rc);
553 }
554
555 static int reada_add_block(struct reada_control *rc, u64 logical,
556 struct btrfs_key *top, u64 generation)
557 {
558 struct btrfs_root *root = rc->root;
559 struct reada_extent *re;
560 struct reada_extctl *rec;
561
562 re = reada_find_extent(root, logical, top); /* takes one ref */
563 if (!re)
564 return -1;
565
566 rec = kzalloc(sizeof(*rec), GFP_NOFS);
567 if (!rec) {
568 reada_extent_put(root->fs_info, re);
569 return -ENOMEM;
570 }
571
572 rec->rc = rc;
573 rec->generation = generation;
574 atomic_inc(&rc->elems);
575
576 spin_lock(&re->lock);
577 list_add_tail(&rec->list, &re->extctl);
578 spin_unlock(&re->lock);
579
580 /* leave the ref on the extent */
581
582 return 0;
583 }
584
585 /*
586 * called with fs_info->reada_lock held
587 */
588 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
589 {
590 int i;
591 unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
592
593 for (i = 0; i < zone->ndevs; ++i) {
594 struct reada_zone *peer;
595 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
596 if (peer && peer->device != zone->device)
597 peer->locked = lock;
598 }
599 }
600
601 /*
602 * called with fs_info->reada_lock held
603 */
604 static int reada_pick_zone(struct btrfs_device *dev)
605 {
606 struct reada_zone *top_zone = NULL;
607 struct reada_zone *top_locked_zone = NULL;
608 u64 top_elems = 0;
609 u64 top_locked_elems = 0;
610 unsigned long index = 0;
611 int ret;
612
613 if (dev->reada_curr_zone) {
614 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
615 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
616 dev->reada_curr_zone = NULL;
617 }
618 /* pick the zone with the most elements */
619 while (1) {
620 struct reada_zone *zone;
621
622 ret = radix_tree_gang_lookup(&dev->reada_zones,
623 (void **)&zone, index, 1);
624 if (ret == 0)
625 break;
626 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
627 if (zone->locked) {
628 if (zone->elems > top_locked_elems) {
629 top_locked_elems = zone->elems;
630 top_locked_zone = zone;
631 }
632 } else {
633 if (zone->elems > top_elems) {
634 top_elems = zone->elems;
635 top_zone = zone;
636 }
637 }
638 }
639 if (top_zone)
640 dev->reada_curr_zone = top_zone;
641 else if (top_locked_zone)
642 dev->reada_curr_zone = top_locked_zone;
643 else
644 return 0;
645
646 dev->reada_next = dev->reada_curr_zone->start;
647 kref_get(&dev->reada_curr_zone->refcnt);
648 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
649
650 return 1;
651 }
652
653 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
654 struct btrfs_device *dev)
655 {
656 struct reada_extent *re = NULL;
657 int mirror_num = 0;
658 struct extent_buffer *eb = NULL;
659 u64 logical;
660 int ret;
661 int i;
662
663 spin_lock(&fs_info->reada_lock);
664 if (dev->reada_curr_zone == NULL) {
665 ret = reada_pick_zone(dev);
666 if (!ret) {
667 spin_unlock(&fs_info->reada_lock);
668 return 0;
669 }
670 }
671 /*
672 * FIXME currently we issue the reads one extent at a time. If we have
673 * a contiguous block of extents, we could also coagulate them or use
674 * plugging to speed things up
675 */
676 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
677 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
678 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
679 ret = reada_pick_zone(dev);
680 if (!ret) {
681 spin_unlock(&fs_info->reada_lock);
682 return 0;
683 }
684 re = NULL;
685 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
686 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
687 }
688 if (ret == 0) {
689 spin_unlock(&fs_info->reada_lock);
690 return 0;
691 }
692 dev->reada_next = re->logical + fs_info->tree_root->nodesize;
693 re->refcnt++;
694
695 spin_unlock(&fs_info->reada_lock);
696
697 spin_lock(&re->lock);
698 if (re->scheduled || list_empty(&re->extctl)) {
699 spin_unlock(&re->lock);
700 reada_extent_put(fs_info, re);
701 return 0;
702 }
703 re->scheduled = 1;
704 spin_unlock(&re->lock);
705
706 /*
707 * find mirror num
708 */
709 for (i = 0; i < re->nzones; ++i) {
710 if (re->zones[i]->device == dev) {
711 mirror_num = i + 1;
712 break;
713 }
714 }
715 logical = re->logical;
716
717 atomic_inc(&dev->reada_in_flight);
718 ret = reada_tree_block_flagged(fs_info->extent_root, logical,
719 mirror_num, &eb);
720 if (ret)
721 __readahead_hook(fs_info, re, NULL, logical, ret);
722 else if (eb)
723 __readahead_hook(fs_info, re, eb, eb->start, ret);
724
725 if (eb)
726 free_extent_buffer(eb);
727
728 atomic_dec(&dev->reada_in_flight);
729 reada_extent_put(fs_info, re);
730
731 return 1;
732
733 }
734
735 static void reada_start_machine_worker(struct btrfs_work *work)
736 {
737 struct reada_machine_work *rmw;
738 struct btrfs_fs_info *fs_info;
739 int old_ioprio;
740
741 rmw = container_of(work, struct reada_machine_work, work);
742 fs_info = rmw->fs_info;
743
744 kfree(rmw);
745
746 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
747 task_nice_ioprio(current));
748 set_task_ioprio(current, BTRFS_IOPRIO_READA);
749 __reada_start_machine(fs_info);
750 set_task_ioprio(current, old_ioprio);
751
752 atomic_dec(&fs_info->reada_works_cnt);
753 }
754
755 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
756 {
757 struct btrfs_device *device;
758 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
759 u64 enqueued;
760 u64 total = 0;
761 int i;
762
763 do {
764 enqueued = 0;
765 list_for_each_entry(device, &fs_devices->devices, dev_list) {
766 if (atomic_read(&device->reada_in_flight) <
767 MAX_IN_FLIGHT)
768 enqueued += reada_start_machine_dev(fs_info,
769 device);
770 }
771 total += enqueued;
772 } while (enqueued && total < 10000);
773
774 if (enqueued == 0)
775 return;
776
777 /*
778 * If everything is already in the cache, this is effectively single
779 * threaded. To a) not hold the caller for too long and b) to utilize
780 * more cores, we broke the loop above after 10000 iterations and now
781 * enqueue to workers to finish it. This will distribute the load to
782 * the cores.
783 */
784 for (i = 0; i < 2; ++i) {
785 reada_start_machine(fs_info);
786 if (atomic_read(&fs_info->reada_works_cnt) >
787 BTRFS_MAX_MIRRORS * 2)
788 break;
789 }
790 }
791
792 static void reada_start_machine(struct btrfs_fs_info *fs_info)
793 {
794 struct reada_machine_work *rmw;
795
796 rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
797 if (!rmw) {
798 /* FIXME we cannot handle this properly right now */
799 BUG();
800 }
801 btrfs_init_work(&rmw->work, btrfs_readahead_helper,
802 reada_start_machine_worker, NULL, NULL);
803 rmw->fs_info = fs_info;
804
805 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
806 atomic_inc(&fs_info->reada_works_cnt);
807 }
808
809 #ifdef DEBUG
810 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
811 {
812 struct btrfs_device *device;
813 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
814 unsigned long index;
815 int ret;
816 int i;
817 int j;
818 int cnt;
819
820 spin_lock(&fs_info->reada_lock);
821 list_for_each_entry(device, &fs_devices->devices, dev_list) {
822 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
823 atomic_read(&device->reada_in_flight));
824 index = 0;
825 while (1) {
826 struct reada_zone *zone;
827 ret = radix_tree_gang_lookup(&device->reada_zones,
828 (void **)&zone, index, 1);
829 if (ret == 0)
830 break;
831 printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
832 "%d devs", zone->start, zone->end, zone->elems,
833 zone->locked);
834 for (j = 0; j < zone->ndevs; ++j) {
835 printk(KERN_CONT " %lld",
836 zone->devs[j]->devid);
837 }
838 if (device->reada_curr_zone == zone)
839 printk(KERN_CONT " curr off %llu",
840 device->reada_next - zone->start);
841 printk(KERN_CONT "\n");
842 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
843 }
844 cnt = 0;
845 index = 0;
846 while (all) {
847 struct reada_extent *re = NULL;
848
849 ret = radix_tree_gang_lookup(&device->reada_extents,
850 (void **)&re, index, 1);
851 if (ret == 0)
852 break;
853 printk(KERN_DEBUG
854 " re: logical %llu size %u empty %d scheduled %d",
855 re->logical, fs_info->tree_root->nodesize,
856 list_empty(&re->extctl), re->scheduled);
857
858 for (i = 0; i < re->nzones; ++i) {
859 printk(KERN_CONT " zone %llu-%llu devs",
860 re->zones[i]->start,
861 re->zones[i]->end);
862 for (j = 0; j < re->zones[i]->ndevs; ++j) {
863 printk(KERN_CONT " %lld",
864 re->zones[i]->devs[j]->devid);
865 }
866 }
867 printk(KERN_CONT "\n");
868 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
869 if (++cnt > 15)
870 break;
871 }
872 }
873
874 index = 0;
875 cnt = 0;
876 while (all) {
877 struct reada_extent *re = NULL;
878
879 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
880 index, 1);
881 if (ret == 0)
882 break;
883 if (!re->scheduled) {
884 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
885 continue;
886 }
887 printk(KERN_DEBUG
888 "re: logical %llu size %u list empty %d scheduled %d",
889 re->logical, fs_info->tree_root->nodesize,
890 list_empty(&re->extctl), re->scheduled);
891 for (i = 0; i < re->nzones; ++i) {
892 printk(KERN_CONT " zone %llu-%llu devs",
893 re->zones[i]->start,
894 re->zones[i]->end);
895 for (j = 0; j < re->zones[i]->ndevs; ++j) {
896 printk(KERN_CONT " %lld",
897 re->zones[i]->devs[j]->devid);
898 }
899 }
900 printk(KERN_CONT "\n");
901 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
902 }
903 spin_unlock(&fs_info->reada_lock);
904 }
905 #endif
906
907 /*
908 * interface
909 */
910 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
911 struct btrfs_key *key_start, struct btrfs_key *key_end)
912 {
913 struct reada_control *rc;
914 u64 start;
915 u64 generation;
916 int ret;
917 struct extent_buffer *node;
918 static struct btrfs_key max_key = {
919 .objectid = (u64)-1,
920 .type = (u8)-1,
921 .offset = (u64)-1
922 };
923
924 rc = kzalloc(sizeof(*rc), GFP_NOFS);
925 if (!rc)
926 return ERR_PTR(-ENOMEM);
927
928 rc->root = root;
929 rc->key_start = *key_start;
930 rc->key_end = *key_end;
931 atomic_set(&rc->elems, 0);
932 init_waitqueue_head(&rc->wait);
933 kref_init(&rc->refcnt);
934 kref_get(&rc->refcnt); /* one ref for having elements */
935
936 node = btrfs_root_node(root);
937 start = node->start;
938 generation = btrfs_header_generation(node);
939 free_extent_buffer(node);
940
941 ret = reada_add_block(rc, start, &max_key, generation);
942 if (ret) {
943 kfree(rc);
944 return ERR_PTR(ret);
945 }
946
947 reada_start_machine(root->fs_info);
948
949 return rc;
950 }
951
952 #ifdef DEBUG
953 int btrfs_reada_wait(void *handle)
954 {
955 struct reada_control *rc = handle;
956 struct btrfs_fs_info *fs_info = rc->root->fs_info;
957
958 while (atomic_read(&rc->elems)) {
959 if (!atomic_read(&fs_info->reada_works_cnt))
960 reada_start_machine(fs_info);
961 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
962 5 * HZ);
963 dump_devs(rc->root->fs_info,
964 atomic_read(&rc->elems) < 10 ? 1 : 0);
965 }
966
967 dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
968
969 kref_put(&rc->refcnt, reada_control_release);
970
971 return 0;
972 }
973 #else
974 int btrfs_reada_wait(void *handle)
975 {
976 struct reada_control *rc = handle;
977 struct btrfs_fs_info *fs_info = rc->root->fs_info;
978
979 while (atomic_read(&rc->elems)) {
980 if (!atomic_read(&fs_info->reada_works_cnt))
981 reada_start_machine(fs_info);
982 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
983 (HZ + 9) / 10);
984 }
985
986 kref_put(&rc->refcnt, reada_control_release);
987
988 return 0;
989 }
990 #endif
991
992 void btrfs_reada_detach(void *handle)
993 {
994 struct reada_control *rc = handle;
995
996 kref_put(&rc->refcnt, reada_control_release);
997 }
This page took 0.073019 seconds and 6 git commands to generate.