Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
e1f8e874 | 6 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
7 | * Initial version. |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/backing-dev.h> | |
8bde37f0 | 16 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 17 | #include <linux/pagevec.h> |
f5ff8422 | 18 | #include <linux/pagemap.h> |
1da177e4 | 19 | |
1da177e4 LT |
20 | /* |
21 | * Initialise a struct file's readahead state. Assumes that the caller has | |
22 | * memset *ra to zero. | |
23 | */ | |
24 | void | |
25 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
26 | { | |
27 | ra->ra_pages = mapping->backing_dev_info->ra_pages; | |
f4e6b498 | 28 | ra->prev_pos = -1; |
1da177e4 | 29 | } |
d41cc702 | 30 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 31 | |
1da177e4 LT |
32 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) |
33 | ||
03fb3d2a DH |
34 | /* |
35 | * see if a page needs releasing upon read_cache_pages() failure | |
266cf658 DH |
36 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
37 | * before calling, such as the NFS fs marking pages that are cached locally | |
38 | * on disk, thus we need to give the fs a chance to clean up in the event of | |
39 | * an error | |
03fb3d2a DH |
40 | */ |
41 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | |
42 | struct page *page) | |
43 | { | |
266cf658 | 44 | if (page_has_private(page)) { |
03fb3d2a DH |
45 | if (!trylock_page(page)) |
46 | BUG(); | |
47 | page->mapping = mapping; | |
48 | do_invalidatepage(page, 0); | |
49 | page->mapping = NULL; | |
50 | unlock_page(page); | |
51 | } | |
52 | page_cache_release(page); | |
53 | } | |
54 | ||
55 | /* | |
56 | * release a list of pages, invalidating them first if need be | |
57 | */ | |
58 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | |
59 | struct list_head *pages) | |
60 | { | |
61 | struct page *victim; | |
62 | ||
63 | while (!list_empty(pages)) { | |
64 | victim = list_to_page(pages); | |
65 | list_del(&victim->lru); | |
66 | read_cache_pages_invalidate_page(mapping, victim); | |
67 | } | |
68 | } | |
69 | ||
1da177e4 | 70 | /** |
bd40cdda | 71 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
72 | * @mapping: the address_space |
73 | * @pages: The address of a list_head which contains the target pages. These | |
74 | * pages have their ->index populated and are otherwise uninitialised. | |
75 | * @filler: callback routine for filling a single page. | |
76 | * @data: private data for the callback routine. | |
77 | * | |
78 | * Hides the details of the LRU cache etc from the filesystems. | |
79 | */ | |
80 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
81 | int (*filler)(void *, struct page *), void *data) | |
82 | { | |
83 | struct page *page; | |
1da177e4 LT |
84 | int ret = 0; |
85 | ||
1da177e4 LT |
86 | while (!list_empty(pages)) { |
87 | page = list_to_page(pages); | |
88 | list_del(&page->lru); | |
eb2be189 NP |
89 | if (add_to_page_cache_lru(page, mapping, |
90 | page->index, GFP_KERNEL)) { | |
03fb3d2a | 91 | read_cache_pages_invalidate_page(mapping, page); |
1da177e4 LT |
92 | continue; |
93 | } | |
eb2be189 NP |
94 | page_cache_release(page); |
95 | ||
1da177e4 | 96 | ret = filler(data, page); |
eb2be189 | 97 | if (unlikely(ret)) { |
03fb3d2a | 98 | read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4 LT |
99 | break; |
100 | } | |
8bde37f0 | 101 | task_io_account_read(PAGE_CACHE_SIZE); |
1da177e4 | 102 | } |
1da177e4 LT |
103 | return ret; |
104 | } | |
105 | ||
106 | EXPORT_SYMBOL(read_cache_pages); | |
107 | ||
108 | static int read_pages(struct address_space *mapping, struct file *filp, | |
109 | struct list_head *pages, unsigned nr_pages) | |
110 | { | |
111 | unsigned page_idx; | |
994fc28c | 112 | int ret; |
1da177e4 LT |
113 | |
114 | if (mapping->a_ops->readpages) { | |
115 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | |
029e332e OH |
116 | /* Clean up the remaining pages */ |
117 | put_pages_list(pages); | |
1da177e4 LT |
118 | goto out; |
119 | } | |
120 | ||
1da177e4 LT |
121 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
122 | struct page *page = list_to_page(pages); | |
123 | list_del(&page->lru); | |
eb2be189 | 124 | if (!add_to_page_cache_lru(page, mapping, |
1da177e4 | 125 | page->index, GFP_KERNEL)) { |
9f1a3cfc | 126 | mapping->a_ops->readpage(filp, page); |
eb2be189 NP |
127 | } |
128 | page_cache_release(page); | |
1da177e4 | 129 | } |
994fc28c | 130 | ret = 0; |
1da177e4 LT |
131 | out: |
132 | return ret; | |
133 | } | |
134 | ||
1da177e4 | 135 | /* |
d30a1100 | 136 | * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all |
1da177e4 LT |
137 | * the pages first, then submits them all for I/O. This avoids the very bad |
138 | * behaviour which would occur if page allocations are causing VM writeback. | |
139 | * We really don't want to intermingle reads and writes like that. | |
140 | * | |
141 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | |
1da177e4 LT |
142 | */ |
143 | static int | |
144 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
46fc3e7b FW |
145 | pgoff_t offset, unsigned long nr_to_read, |
146 | unsigned long lookahead_size) | |
1da177e4 LT |
147 | { |
148 | struct inode *inode = mapping->host; | |
149 | struct page *page; | |
150 | unsigned long end_index; /* The last page we want to read */ | |
151 | LIST_HEAD(page_pool); | |
152 | int page_idx; | |
153 | int ret = 0; | |
154 | loff_t isize = i_size_read(inode); | |
155 | ||
156 | if (isize == 0) | |
157 | goto out; | |
158 | ||
46fc3e7b | 159 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); |
1da177e4 LT |
160 | |
161 | /* | |
162 | * Preallocate as many pages as we will need. | |
163 | */ | |
1da177e4 | 164 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { |
7361f4d8 | 165 | pgoff_t page_offset = offset + page_idx; |
c743d96b | 166 | |
1da177e4 LT |
167 | if (page_offset > end_index) |
168 | break; | |
169 | ||
00128188 | 170 | rcu_read_lock(); |
1da177e4 | 171 | page = radix_tree_lookup(&mapping->page_tree, page_offset); |
00128188 | 172 | rcu_read_unlock(); |
1da177e4 LT |
173 | if (page) |
174 | continue; | |
175 | ||
1da177e4 | 176 | page = page_cache_alloc_cold(mapping); |
1da177e4 LT |
177 | if (!page) |
178 | break; | |
179 | page->index = page_offset; | |
180 | list_add(&page->lru, &page_pool); | |
46fc3e7b FW |
181 | if (page_idx == nr_to_read - lookahead_size) |
182 | SetPageReadahead(page); | |
1da177e4 LT |
183 | ret++; |
184 | } | |
1da177e4 LT |
185 | |
186 | /* | |
187 | * Now start the IO. We ignore I/O errors - if the page is not | |
188 | * uptodate then the caller will launch readpage again, and | |
189 | * will then handle the error. | |
190 | */ | |
191 | if (ret) | |
192 | read_pages(mapping, filp, &page_pool, ret); | |
193 | BUG_ON(!list_empty(&page_pool)); | |
194 | out: | |
195 | return ret; | |
196 | } | |
197 | ||
198 | /* | |
199 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
200 | * memory at once. | |
201 | */ | |
202 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 203 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
204 | { |
205 | int ret = 0; | |
206 | ||
207 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | |
208 | return -EINVAL; | |
209 | ||
f7e839dd | 210 | nr_to_read = max_sane_readahead(nr_to_read); |
1da177e4 LT |
211 | while (nr_to_read) { |
212 | int err; | |
213 | ||
214 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | |
215 | ||
216 | if (this_chunk > nr_to_read) | |
217 | this_chunk = nr_to_read; | |
218 | err = __do_page_cache_readahead(mapping, filp, | |
46fc3e7b | 219 | offset, this_chunk, 0); |
1da177e4 LT |
220 | if (err < 0) { |
221 | ret = err; | |
222 | break; | |
223 | } | |
224 | ret += err; | |
225 | offset += this_chunk; | |
226 | nr_to_read -= this_chunk; | |
227 | } | |
228 | return ret; | |
229 | } | |
230 | ||
1da177e4 LT |
231 | /* |
232 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | |
233 | * sensible upper limit. | |
234 | */ | |
235 | unsigned long max_sane_readahead(unsigned long nr) | |
236 | { | |
4f98a2fe | 237 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE) |
05a0416b | 238 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); |
1da177e4 | 239 | } |
5ce1110b FW |
240 | |
241 | /* | |
242 | * Submit IO for the read-ahead request in file_ra_state. | |
243 | */ | |
d30a1100 | 244 | unsigned long ra_submit(struct file_ra_state *ra, |
5ce1110b FW |
245 | struct address_space *mapping, struct file *filp) |
246 | { | |
5ce1110b FW |
247 | int actual; |
248 | ||
5ce1110b | 249 | actual = __do_page_cache_readahead(mapping, filp, |
f9acc8c7 | 250 | ra->start, ra->size, ra->async_size); |
5ce1110b FW |
251 | |
252 | return actual; | |
253 | } | |
122a21d1 | 254 | |
c743d96b FW |
255 | /* |
256 | * Set the initial window size, round to next power of 2 and square | |
257 | * for small size, x 4 for medium, and x 2 for large | |
258 | * for 128k (32 page) max ra | |
259 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
260 | */ | |
261 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
262 | { | |
263 | unsigned long newsize = roundup_pow_of_two(size); | |
264 | ||
265 | if (newsize <= max / 32) | |
266 | newsize = newsize * 4; | |
267 | else if (newsize <= max / 4) | |
268 | newsize = newsize * 2; | |
269 | else | |
270 | newsize = max; | |
271 | ||
272 | return newsize; | |
273 | } | |
274 | ||
122a21d1 FW |
275 | /* |
276 | * Get the previous window size, ramp it up, and | |
277 | * return it as the new window size. | |
278 | */ | |
c743d96b | 279 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
122a21d1 FW |
280 | unsigned long max) |
281 | { | |
f9acc8c7 | 282 | unsigned long cur = ra->size; |
122a21d1 FW |
283 | unsigned long newsize; |
284 | ||
285 | if (cur < max / 16) | |
c743d96b | 286 | newsize = 4 * cur; |
122a21d1 | 287 | else |
c743d96b | 288 | newsize = 2 * cur; |
122a21d1 FW |
289 | |
290 | return min(newsize, max); | |
291 | } | |
292 | ||
293 | /* | |
294 | * On-demand readahead design. | |
295 | * | |
296 | * The fields in struct file_ra_state represent the most-recently-executed | |
297 | * readahead attempt: | |
298 | * | |
f9acc8c7 FW |
299 | * |<----- async_size ---------| |
300 | * |------------------- size -------------------->| | |
301 | * |==================#===========================| | |
302 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
303 | * |
304 | * To overlap application thinking time and disk I/O time, we do | |
305 | * `readahead pipelining': Do not wait until the application consumed all | |
306 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
307 | * Instead, submit an asynchronous readahead I/O as soon as there are |
308 | * only async_size pages left in the readahead window. Normally async_size | |
309 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
310 | * |
311 | * In interleaved sequential reads, concurrent streams on the same fd can | |
312 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 313 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
314 | * indicator. The flag won't be set on already cached pages, to avoid the |
315 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
316 | * | |
f4e6b498 | 317 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
318 | * It should be maintained by the caller, and will be used for detecting |
319 | * small random reads. Note that the readahead algorithm checks loosely | |
320 | * for sequential patterns. Hence interleaved reads might be served as | |
321 | * sequential ones. | |
322 | * | |
323 | * There is a special-case: if the first page which the application tries to | |
324 | * read happens to be the first page of the file, it is assumed that a linear | |
325 | * read is about to happen and the window is immediately set to the initial size | |
326 | * based on I/O request size and the max_readahead. | |
327 | * | |
328 | * The code ramps up the readahead size aggressively at first, but slow down as | |
329 | * it approaches max_readhead. | |
330 | */ | |
331 | ||
10be0b37 WF |
332 | /* |
333 | * Count contiguously cached pages from @offset-1 to @offset-@max, | |
334 | * this count is a conservative estimation of | |
335 | * - length of the sequential read sequence, or | |
336 | * - thrashing threshold in memory tight systems | |
337 | */ | |
338 | static pgoff_t count_history_pages(struct address_space *mapping, | |
339 | struct file_ra_state *ra, | |
340 | pgoff_t offset, unsigned long max) | |
341 | { | |
342 | pgoff_t head; | |
343 | ||
344 | rcu_read_lock(); | |
345 | head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max); | |
346 | rcu_read_unlock(); | |
347 | ||
348 | return offset - 1 - head; | |
349 | } | |
350 | ||
351 | /* | |
352 | * page cache context based read-ahead | |
353 | */ | |
354 | static int try_context_readahead(struct address_space *mapping, | |
355 | struct file_ra_state *ra, | |
356 | pgoff_t offset, | |
357 | unsigned long req_size, | |
358 | unsigned long max) | |
359 | { | |
360 | pgoff_t size; | |
361 | ||
362 | size = count_history_pages(mapping, ra, offset, max); | |
363 | ||
364 | /* | |
365 | * no history pages: | |
366 | * it could be a random read | |
367 | */ | |
368 | if (!size) | |
369 | return 0; | |
370 | ||
371 | /* | |
372 | * starts from beginning of file: | |
373 | * it is a strong indication of long-run stream (or whole-file-read) | |
374 | */ | |
375 | if (size >= offset) | |
376 | size *= 2; | |
377 | ||
378 | ra->start = offset; | |
379 | ra->size = get_init_ra_size(size + req_size, max); | |
380 | ra->async_size = ra->size; | |
381 | ||
382 | return 1; | |
383 | } | |
384 | ||
122a21d1 FW |
385 | /* |
386 | * A minimal readahead algorithm for trivial sequential/random reads. | |
387 | */ | |
388 | static unsigned long | |
389 | ondemand_readahead(struct address_space *mapping, | |
390 | struct file_ra_state *ra, struct file *filp, | |
cf914a7d | 391 | bool hit_readahead_marker, pgoff_t offset, |
122a21d1 FW |
392 | unsigned long req_size) |
393 | { | |
fc31d16a | 394 | unsigned long max = max_sane_readahead(ra->ra_pages); |
045a2529 WF |
395 | |
396 | /* | |
397 | * start of file | |
398 | */ | |
399 | if (!offset) | |
400 | goto initial_readahead; | |
122a21d1 FW |
401 | |
402 | /* | |
f9acc8c7 | 403 | * It's the expected callback offset, assume sequential access. |
122a21d1 FW |
404 | * Ramp up sizes, and push forward the readahead window. |
405 | */ | |
045a2529 WF |
406 | if ((offset == (ra->start + ra->size - ra->async_size) || |
407 | offset == (ra->start + ra->size))) { | |
f9acc8c7 FW |
408 | ra->start += ra->size; |
409 | ra->size = get_next_ra_size(ra, max); | |
410 | ra->async_size = ra->size; | |
411 | goto readit; | |
122a21d1 FW |
412 | } |
413 | ||
6b10c6c9 FW |
414 | /* |
415 | * Hit a marked page without valid readahead state. | |
416 | * E.g. interleaved reads. | |
417 | * Query the pagecache for async_size, which normally equals to | |
418 | * readahead size. Ramp it up and use it as the new readahead size. | |
419 | */ | |
420 | if (hit_readahead_marker) { | |
421 | pgoff_t start; | |
422 | ||
30002ed2 | 423 | rcu_read_lock(); |
caca7cb7 | 424 | start = radix_tree_next_hole(&mapping->page_tree, offset+1,max); |
30002ed2 | 425 | rcu_read_unlock(); |
6b10c6c9 FW |
426 | |
427 | if (!start || start - offset > max) | |
428 | return 0; | |
429 | ||
430 | ra->start = start; | |
431 | ra->size = start - offset; /* old async_size */ | |
160334a0 | 432 | ra->size += req_size; |
6b10c6c9 FW |
433 | ra->size = get_next_ra_size(ra, max); |
434 | ra->async_size = ra->size; | |
435 | goto readit; | |
436 | } | |
437 | ||
122a21d1 | 438 | /* |
045a2529 | 439 | * oversize read |
122a21d1 | 440 | */ |
045a2529 WF |
441 | if (req_size > max) |
442 | goto initial_readahead; | |
443 | ||
444 | /* | |
445 | * sequential cache miss | |
446 | */ | |
447 | if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL) | |
448 | goto initial_readahead; | |
449 | ||
10be0b37 WF |
450 | /* |
451 | * Query the page cache and look for the traces(cached history pages) | |
452 | * that a sequential stream would leave behind. | |
453 | */ | |
454 | if (try_context_readahead(mapping, ra, offset, req_size, max)) | |
455 | goto readit; | |
456 | ||
045a2529 WF |
457 | /* |
458 | * standalone, small random read | |
459 | * Read as is, and do not pollute the readahead state. | |
460 | */ | |
461 | return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); | |
462 | ||
463 | initial_readahead: | |
f9acc8c7 FW |
464 | ra->start = offset; |
465 | ra->size = get_init_ra_size(req_size, max); | |
466 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; | |
122a21d1 | 467 | |
f9acc8c7 | 468 | readit: |
51daa88e WF |
469 | /* |
470 | * Will this read hit the readahead marker made by itself? | |
471 | * If so, trigger the readahead marker hit now, and merge | |
472 | * the resulted next readahead window into the current one. | |
473 | */ | |
474 | if (offset == ra->start && ra->size == ra->async_size) { | |
475 | ra->async_size = get_next_ra_size(ra, max); | |
476 | ra->size += ra->async_size; | |
477 | } | |
478 | ||
122a21d1 FW |
479 | return ra_submit(ra, mapping, filp); |
480 | } | |
481 | ||
482 | /** | |
cf914a7d | 483 | * page_cache_sync_readahead - generic file readahead |
122a21d1 FW |
484 | * @mapping: address_space which holds the pagecache and I/O vectors |
485 | * @ra: file_ra_state which holds the readahead state | |
486 | * @filp: passed on to ->readpage() and ->readpages() | |
cf914a7d | 487 | * @offset: start offset into @mapping, in pagecache page-sized units |
122a21d1 | 488 | * @req_size: hint: total size of the read which the caller is performing in |
cf914a7d | 489 | * pagecache pages |
122a21d1 | 490 | * |
cf914a7d RR |
491 | * page_cache_sync_readahead() should be called when a cache miss happened: |
492 | * it will submit the read. The readahead logic may decide to piggyback more | |
493 | * pages onto the read request if access patterns suggest it will improve | |
494 | * performance. | |
122a21d1 | 495 | */ |
cf914a7d RR |
496 | void page_cache_sync_readahead(struct address_space *mapping, |
497 | struct file_ra_state *ra, struct file *filp, | |
498 | pgoff_t offset, unsigned long req_size) | |
122a21d1 FW |
499 | { |
500 | /* no read-ahead */ | |
501 | if (!ra->ra_pages) | |
cf914a7d RR |
502 | return; |
503 | ||
504 | /* do read-ahead */ | |
505 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); | |
506 | } | |
507 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |
508 | ||
509 | /** | |
510 | * page_cache_async_readahead - file readahead for marked pages | |
511 | * @mapping: address_space which holds the pagecache and I/O vectors | |
512 | * @ra: file_ra_state which holds the readahead state | |
513 | * @filp: passed on to ->readpage() and ->readpages() | |
514 | * @page: the page at @offset which has the PG_readahead flag set | |
515 | * @offset: start offset into @mapping, in pagecache page-sized units | |
516 | * @req_size: hint: total size of the read which the caller is performing in | |
517 | * pagecache pages | |
518 | * | |
519 | * page_cache_async_ondemand() should be called when a page is used which | |
f7850d93 | 520 | * has the PG_readahead flag; this is a marker to suggest that the application |
cf914a7d | 521 | * has used up enough of the readahead window that we should start pulling in |
f7850d93 RD |
522 | * more pages. |
523 | */ | |
cf914a7d RR |
524 | void |
525 | page_cache_async_readahead(struct address_space *mapping, | |
526 | struct file_ra_state *ra, struct file *filp, | |
527 | struct page *page, pgoff_t offset, | |
528 | unsigned long req_size) | |
529 | { | |
530 | /* no read-ahead */ | |
531 | if (!ra->ra_pages) | |
532 | return; | |
533 | ||
534 | /* | |
535 | * Same bit is used for PG_readahead and PG_reclaim. | |
536 | */ | |
537 | if (PageWriteback(page)) | |
538 | return; | |
539 | ||
540 | ClearPageReadahead(page); | |
541 | ||
542 | /* | |
543 | * Defer asynchronous read-ahead on IO congestion. | |
544 | */ | |
545 | if (bdi_read_congested(mapping->backing_dev_info)) | |
546 | return; | |
122a21d1 FW |
547 | |
548 | /* do read-ahead */ | |
cf914a7d | 549 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
122a21d1 | 550 | } |
cf914a7d | 551 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |