Commit | Line | Data |
---|---|---|
9ae326a6 DH |
1 | /* Storage object read/write |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/mount.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
9ae326a6 DH |
14 | #include <linux/file.h> |
15 | #include "internal.h" | |
16 | ||
17 | /* | |
18 | * detect wake up events generated by the unlocking of pages in which we're | |
19 | * interested | |
20 | * - we use this to detect read completion of backing pages | |
21 | * - the caller holds the waitqueue lock | |
22 | */ | |
23 | static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, | |
24 | int sync, void *_key) | |
25 | { | |
26 | struct cachefiles_one_read *monitor = | |
27 | container_of(wait, struct cachefiles_one_read, monitor); | |
28 | struct cachefiles_object *object; | |
29 | struct wait_bit_key *key = _key; | |
30 | struct page *page = wait->private; | |
31 | ||
32 | ASSERT(key); | |
33 | ||
34 | _enter("{%lu},%u,%d,{%p,%u}", | |
35 | monitor->netfs_page->index, mode, sync, | |
36 | key->flags, key->bit_nr); | |
37 | ||
38 | if (key->flags != &page->flags || | |
39 | key->bit_nr != PG_locked) | |
40 | return 0; | |
41 | ||
42 | _debug("--- monitor %p %lx ---", page, page->flags); | |
43 | ||
5e929b33 DH |
44 | if (!PageUptodate(page) && !PageError(page)) { |
45 | /* unlocked, not uptodate and not erronous? */ | |
46 | _debug("page probably truncated"); | |
47 | } | |
9ae326a6 DH |
48 | |
49 | /* remove from the waitqueue */ | |
50 | list_del(&wait->task_list); | |
51 | ||
52 | /* move onto the action list and queue for FS-Cache thread pool */ | |
53 | ASSERT(monitor->op); | |
54 | ||
55 | object = container_of(monitor->op->op.object, | |
56 | struct cachefiles_object, fscache); | |
57 | ||
58 | spin_lock(&object->work_lock); | |
59 | list_add_tail(&monitor->op_link, &monitor->op->to_do); | |
60 | spin_unlock(&object->work_lock); | |
61 | ||
62 | fscache_enqueue_retrieval(monitor->op); | |
63 | return 0; | |
64 | } | |
65 | ||
5e929b33 DH |
66 | /* |
67 | * handle a probably truncated page | |
68 | * - check to see if the page is still relevant and reissue the read if | |
69 | * possible | |
70 | * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we | |
71 | * must wait again and 0 if successful | |
72 | */ | |
73 | static int cachefiles_read_reissue(struct cachefiles_object *object, | |
74 | struct cachefiles_one_read *monitor) | |
75 | { | |
76 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
77 | struct page *backpage = monitor->back_page, *backpage2; | |
78 | int ret; | |
79 | ||
37491a13 | 80 | _enter("{ino=%lx},{%lx,%lx}", |
5e929b33 DH |
81 | object->backer->d_inode->i_ino, |
82 | backpage->index, backpage->flags); | |
83 | ||
84 | /* skip if the page was truncated away completely */ | |
85 | if (backpage->mapping != bmapping) { | |
37491a13 | 86 | _leave(" = -ENODATA [mapping]"); |
5e929b33 DH |
87 | return -ENODATA; |
88 | } | |
89 | ||
90 | backpage2 = find_get_page(bmapping, backpage->index); | |
91 | if (!backpage2) { | |
37491a13 | 92 | _leave(" = -ENODATA [gone]"); |
5e929b33 DH |
93 | return -ENODATA; |
94 | } | |
95 | ||
96 | if (backpage != backpage2) { | |
97 | put_page(backpage2); | |
37491a13 | 98 | _leave(" = -ENODATA [different]"); |
5e929b33 DH |
99 | return -ENODATA; |
100 | } | |
101 | ||
102 | /* the page is still there and we already have a ref on it, so we don't | |
103 | * need a second */ | |
104 | put_page(backpage2); | |
105 | ||
106 | INIT_LIST_HEAD(&monitor->op_link); | |
107 | add_page_wait_queue(backpage, &monitor->monitor); | |
108 | ||
109 | if (trylock_page(backpage)) { | |
110 | ret = -EIO; | |
111 | if (PageError(backpage)) | |
112 | goto unlock_discard; | |
113 | ret = 0; | |
114 | if (PageUptodate(backpage)) | |
115 | goto unlock_discard; | |
116 | ||
37491a13 | 117 | _debug("reissue read"); |
5e929b33 DH |
118 | ret = bmapping->a_ops->readpage(NULL, backpage); |
119 | if (ret < 0) | |
120 | goto unlock_discard; | |
121 | } | |
122 | ||
123 | /* but the page may have been read before the monitor was installed, so | |
124 | * the monitor may miss the event - so we have to ensure that we do get | |
125 | * one in such a case */ | |
126 | if (trylock_page(backpage)) { | |
127 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
128 | unlock_page(backpage); | |
129 | } | |
130 | ||
131 | /* it'll reappear on the todo list */ | |
37491a13 | 132 | _leave(" = -EINPROGRESS"); |
5e929b33 DH |
133 | return -EINPROGRESS; |
134 | ||
135 | unlock_discard: | |
136 | unlock_page(backpage); | |
137 | spin_lock_irq(&object->work_lock); | |
138 | list_del(&monitor->op_link); | |
139 | spin_unlock_irq(&object->work_lock); | |
37491a13 | 140 | _leave(" = %d", ret); |
5e929b33 DH |
141 | return ret; |
142 | } | |
143 | ||
9ae326a6 DH |
144 | /* |
145 | * copy data from backing pages to netfs pages to complete a read operation | |
146 | * - driven by FS-Cache's thread pool | |
147 | */ | |
148 | static void cachefiles_read_copier(struct fscache_operation *_op) | |
149 | { | |
150 | struct cachefiles_one_read *monitor; | |
151 | struct cachefiles_object *object; | |
152 | struct fscache_retrieval *op; | |
153 | struct pagevec pagevec; | |
154 | int error, max; | |
155 | ||
156 | op = container_of(_op, struct fscache_retrieval, op); | |
157 | object = container_of(op->op.object, | |
158 | struct cachefiles_object, fscache); | |
159 | ||
160 | _enter("{ino=%lu}", object->backer->d_inode->i_ino); | |
161 | ||
162 | pagevec_init(&pagevec, 0); | |
163 | ||
164 | max = 8; | |
165 | spin_lock_irq(&object->work_lock); | |
166 | ||
167 | while (!list_empty(&op->to_do)) { | |
168 | monitor = list_entry(op->to_do.next, | |
169 | struct cachefiles_one_read, op_link); | |
170 | list_del(&monitor->op_link); | |
171 | ||
172 | spin_unlock_irq(&object->work_lock); | |
173 | ||
174 | _debug("- copy {%lu}", monitor->back_page->index); | |
175 | ||
5e929b33 | 176 | recheck: |
9ae326a6 DH |
177 | if (PageUptodate(monitor->back_page)) { |
178 | copy_highpage(monitor->netfs_page, monitor->back_page); | |
c4d6d8db DH |
179 | fscache_mark_page_cached(monitor->op, |
180 | monitor->netfs_page); | |
9ae326a6 | 181 | error = 0; |
5e929b33 DH |
182 | } else if (!PageError(monitor->back_page)) { |
183 | /* the page has probably been truncated */ | |
184 | error = cachefiles_read_reissue(object, monitor); | |
185 | if (error == -EINPROGRESS) | |
186 | goto next; | |
187 | goto recheck; | |
188 | } else { | |
9ae326a6 DH |
189 | cachefiles_io_error_obj( |
190 | object, | |
191 | "Readpage failed on backing file %lx", | |
192 | (unsigned long) monitor->back_page->flags); | |
5e929b33 DH |
193 | error = -EIO; |
194 | } | |
9ae326a6 DH |
195 | |
196 | page_cache_release(monitor->back_page); | |
197 | ||
198 | fscache_end_io(op, monitor->netfs_page, error); | |
199 | page_cache_release(monitor->netfs_page); | |
9f10523f | 200 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
201 | fscache_put_retrieval(op); |
202 | kfree(monitor); | |
203 | ||
5e929b33 | 204 | next: |
9ae326a6 DH |
205 | /* let the thread pool have some air occasionally */ |
206 | max--; | |
207 | if (max < 0 || need_resched()) { | |
208 | if (!list_empty(&op->to_do)) | |
209 | fscache_enqueue_retrieval(op); | |
210 | _leave(" [maxed out]"); | |
211 | return; | |
212 | } | |
213 | ||
214 | spin_lock_irq(&object->work_lock); | |
215 | } | |
216 | ||
217 | spin_unlock_irq(&object->work_lock); | |
218 | _leave(""); | |
219 | } | |
220 | ||
221 | /* | |
222 | * read the corresponding page to the given set from the backing file | |
223 | * - an uncertain page is simply discarded, to be tried again another time | |
224 | */ | |
225 | static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |
226 | struct fscache_retrieval *op, | |
227 | struct page *netpage, | |
228 | struct pagevec *pagevec) | |
229 | { | |
230 | struct cachefiles_one_read *monitor; | |
231 | struct address_space *bmapping; | |
232 | struct page *newpage, *backpage; | |
233 | int ret; | |
234 | ||
235 | _enter(""); | |
236 | ||
237 | pagevec_reinit(pagevec); | |
238 | ||
239 | _debug("read back %p{%lu,%d}", | |
240 | netpage, netpage->index, page_count(netpage)); | |
241 | ||
5f4f9f4a | 242 | monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); |
9ae326a6 DH |
243 | if (!monitor) |
244 | goto nomem; | |
245 | ||
246 | monitor->netfs_page = netpage; | |
247 | monitor->op = fscache_get_retrieval(op); | |
248 | ||
249 | init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); | |
250 | ||
251 | /* attempt to get hold of the backing page */ | |
252 | bmapping = object->backer->d_inode->i_mapping; | |
253 | newpage = NULL; | |
254 | ||
255 | for (;;) { | |
256 | backpage = find_get_page(bmapping, netpage->index); | |
257 | if (backpage) | |
258 | goto backing_page_already_present; | |
259 | ||
260 | if (!newpage) { | |
5f4f9f4a DH |
261 | newpage = __page_cache_alloc(cachefiles_gfp | |
262 | __GFP_COLD); | |
9ae326a6 DH |
263 | if (!newpage) |
264 | goto nomem_monitor; | |
265 | } | |
266 | ||
267 | ret = add_to_page_cache(newpage, bmapping, | |
5f4f9f4a | 268 | netpage->index, cachefiles_gfp); |
9ae326a6 DH |
269 | if (ret == 0) |
270 | goto installed_new_backing_page; | |
271 | if (ret != -EEXIST) | |
272 | goto nomem_page; | |
273 | } | |
274 | ||
275 | /* we've installed a new backing page, so now we need to add it | |
276 | * to the LRU list and start it reading */ | |
277 | installed_new_backing_page: | |
278 | _debug("- new %p", newpage); | |
279 | ||
280 | backpage = newpage; | |
281 | newpage = NULL; | |
282 | ||
283 | page_cache_get(backpage); | |
284 | pagevec_add(pagevec, backpage); | |
285 | __pagevec_lru_add_file(pagevec); | |
286 | ||
287 | read_backing_page: | |
288 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
289 | if (ret < 0) | |
290 | goto read_error; | |
291 | ||
292 | /* set the monitor to transfer the data across */ | |
293 | monitor_backing_page: | |
294 | _debug("- monitor add"); | |
295 | ||
296 | /* install the monitor */ | |
297 | page_cache_get(monitor->netfs_page); | |
298 | page_cache_get(backpage); | |
299 | monitor->back_page = backpage; | |
300 | monitor->monitor.private = backpage; | |
301 | add_page_wait_queue(backpage, &monitor->monitor); | |
302 | monitor = NULL; | |
303 | ||
304 | /* but the page may have been read before the monitor was installed, so | |
305 | * the monitor may miss the event - so we have to ensure that we do get | |
306 | * one in such a case */ | |
307 | if (trylock_page(backpage)) { | |
308 | _debug("jumpstart %p {%lx}", backpage, backpage->flags); | |
309 | unlock_page(backpage); | |
310 | } | |
311 | goto success; | |
312 | ||
313 | /* if the backing page is already present, it can be in one of | |
314 | * three states: read in progress, read failed or read okay */ | |
315 | backing_page_already_present: | |
316 | _debug("- present"); | |
317 | ||
318 | if (newpage) { | |
319 | page_cache_release(newpage); | |
320 | newpage = NULL; | |
321 | } | |
322 | ||
323 | if (PageError(backpage)) | |
324 | goto io_error; | |
325 | ||
326 | if (PageUptodate(backpage)) | |
327 | goto backing_page_already_uptodate; | |
328 | ||
329 | if (!trylock_page(backpage)) | |
330 | goto monitor_backing_page; | |
331 | _debug("read %p {%lx}", backpage, backpage->flags); | |
332 | goto read_backing_page; | |
333 | ||
334 | /* the backing page is already up to date, attach the netfs | |
335 | * page to the pagecache and LRU and copy the data across */ | |
336 | backing_page_already_uptodate: | |
337 | _debug("- uptodate"); | |
338 | ||
c4d6d8db | 339 | fscache_mark_page_cached(op, netpage); |
9ae326a6 DH |
340 | |
341 | copy_highpage(netpage, backpage); | |
342 | fscache_end_io(op, netpage, 0); | |
9f10523f | 343 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
344 | |
345 | success: | |
346 | _debug("success"); | |
347 | ret = 0; | |
348 | ||
349 | out: | |
350 | if (backpage) | |
351 | page_cache_release(backpage); | |
352 | if (monitor) { | |
353 | fscache_put_retrieval(monitor->op); | |
354 | kfree(monitor); | |
355 | } | |
356 | _leave(" = %d", ret); | |
357 | return ret; | |
358 | ||
359 | read_error: | |
360 | _debug("read error %d", ret); | |
361 | if (ret == -ENOMEM) | |
362 | goto out; | |
363 | io_error: | |
364 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
9f10523f | 365 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
366 | ret = -ENOBUFS; |
367 | goto out; | |
368 | ||
369 | nomem_page: | |
370 | page_cache_release(newpage); | |
371 | nomem_monitor: | |
372 | fscache_put_retrieval(monitor->op); | |
373 | kfree(monitor); | |
374 | nomem: | |
9f10523f | 375 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
376 | _leave(" = -ENOMEM"); |
377 | return -ENOMEM; | |
378 | } | |
379 | ||
380 | /* | |
381 | * read a page from the cache or allocate a block in which to store it | |
382 | * - cache withdrawal is prevented by the caller | |
383 | * - returns -EINTR if interrupted | |
384 | * - returns -ENOMEM if ran out of memory | |
385 | * - returns -ENOBUFS if no buffers can be made available | |
386 | * - returns -ENOBUFS if page is beyond EOF | |
387 | * - if the page is backed by a block in the cache: | |
388 | * - a read will be started which will call the callback on completion | |
389 | * - 0 will be returned | |
390 | * - else if the page is unbacked: | |
391 | * - the metadata will be retained | |
392 | * - -ENODATA will be returned | |
393 | */ | |
394 | int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |
395 | struct page *page, | |
396 | gfp_t gfp) | |
397 | { | |
398 | struct cachefiles_object *object; | |
399 | struct cachefiles_cache *cache; | |
400 | struct pagevec pagevec; | |
401 | struct inode *inode; | |
402 | sector_t block0, block; | |
403 | unsigned shift; | |
404 | int ret; | |
405 | ||
406 | object = container_of(op->op.object, | |
407 | struct cachefiles_object, fscache); | |
408 | cache = container_of(object->fscache.cache, | |
409 | struct cachefiles_cache, cache); | |
410 | ||
411 | _enter("{%p},{%lx},,,", object, page->index); | |
412 | ||
413 | if (!object->backer) | |
9f10523f | 414 | goto enobufs; |
9ae326a6 DH |
415 | |
416 | inode = object->backer->d_inode; | |
417 | ASSERT(S_ISREG(inode->i_mode)); | |
418 | ASSERT(inode->i_mapping->a_ops->bmap); | |
419 | ASSERT(inode->i_mapping->a_ops->readpages); | |
420 | ||
421 | /* calculate the shift required to use bmap */ | |
422 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
9f10523f | 423 | goto enobufs; |
9ae326a6 DH |
424 | |
425 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
426 | ||
4fbf4291 | 427 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
8af7c124 | 428 | op->op.flags |= FSCACHE_OP_ASYNC; |
9ae326a6 DH |
429 | op->op.processor = cachefiles_read_copier; |
430 | ||
431 | pagevec_init(&pagevec, 0); | |
432 | ||
433 | /* we assume the absence or presence of the first block is a good | |
434 | * enough indication for the page as a whole | |
435 | * - TODO: don't use bmap() for this as it is _not_ actually good | |
436 | * enough for this as it doesn't indicate errors, but it's all we've | |
437 | * got for the moment | |
438 | */ | |
439 | block0 = page->index; | |
440 | block0 <<= shift; | |
441 | ||
442 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); | |
443 | _debug("%llx -> %llx", | |
444 | (unsigned long long) block0, | |
445 | (unsigned long long) block); | |
446 | ||
447 | if (block) { | |
448 | /* submit the apparently valid page to the backing fs to be | |
449 | * read from disk */ | |
450 | ret = cachefiles_read_backing_file_one(object, op, page, | |
451 | &pagevec); | |
452 | } else if (cachefiles_has_space(cache, 0, 1) == 0) { | |
453 | /* there's space in the cache we can use */ | |
c4d6d8db | 454 | fscache_mark_page_cached(op, page); |
9f10523f | 455 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
456 | ret = -ENODATA; |
457 | } else { | |
9f10523f | 458 | goto enobufs; |
9ae326a6 DH |
459 | } |
460 | ||
461 | _leave(" = %d", ret); | |
462 | return ret; | |
9f10523f DH |
463 | |
464 | enobufs: | |
465 | fscache_retrieval_complete(op, 1); | |
466 | _leave(" = -ENOBUFS"); | |
467 | return -ENOBUFS; | |
9ae326a6 DH |
468 | } |
469 | ||
470 | /* | |
471 | * read the corresponding pages to the given set from the backing file | |
472 | * - any uncertain pages are simply discarded, to be tried again another time | |
473 | */ | |
474 | static int cachefiles_read_backing_file(struct cachefiles_object *object, | |
475 | struct fscache_retrieval *op, | |
c4d6d8db | 476 | struct list_head *list) |
9ae326a6 DH |
477 | { |
478 | struct cachefiles_one_read *monitor = NULL; | |
479 | struct address_space *bmapping = object->backer->d_inode->i_mapping; | |
480 | struct pagevec lru_pvec; | |
481 | struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; | |
482 | int ret = 0; | |
483 | ||
484 | _enter(""); | |
485 | ||
486 | pagevec_init(&lru_pvec, 0); | |
487 | ||
488 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
489 | list_del(&netpage->lru); | |
490 | ||
491 | _debug("read back %p{%lu,%d}", | |
492 | netpage, netpage->index, page_count(netpage)); | |
493 | ||
494 | if (!monitor) { | |
5f4f9f4a | 495 | monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); |
9ae326a6 DH |
496 | if (!monitor) |
497 | goto nomem; | |
498 | ||
499 | monitor->op = fscache_get_retrieval(op); | |
500 | init_waitqueue_func_entry(&monitor->monitor, | |
501 | cachefiles_read_waiter); | |
502 | } | |
503 | ||
504 | for (;;) { | |
505 | backpage = find_get_page(bmapping, netpage->index); | |
506 | if (backpage) | |
507 | goto backing_page_already_present; | |
508 | ||
509 | if (!newpage) { | |
5f4f9f4a DH |
510 | newpage = __page_cache_alloc(cachefiles_gfp | |
511 | __GFP_COLD); | |
9ae326a6 DH |
512 | if (!newpage) |
513 | goto nomem; | |
514 | } | |
515 | ||
516 | ret = add_to_page_cache(newpage, bmapping, | |
5f4f9f4a | 517 | netpage->index, cachefiles_gfp); |
9ae326a6 DH |
518 | if (ret == 0) |
519 | goto installed_new_backing_page; | |
520 | if (ret != -EEXIST) | |
521 | goto nomem; | |
522 | } | |
523 | ||
524 | /* we've installed a new backing page, so now we need to add it | |
525 | * to the LRU list and start it reading */ | |
526 | installed_new_backing_page: | |
527 | _debug("- new %p", newpage); | |
528 | ||
529 | backpage = newpage; | |
530 | newpage = NULL; | |
531 | ||
532 | page_cache_get(backpage); | |
533 | if (!pagevec_add(&lru_pvec, backpage)) | |
534 | __pagevec_lru_add_file(&lru_pvec); | |
535 | ||
536 | reread_backing_page: | |
537 | ret = bmapping->a_ops->readpage(NULL, backpage); | |
538 | if (ret < 0) | |
539 | goto read_error; | |
540 | ||
541 | /* add the netfs page to the pagecache and LRU, and set the | |
542 | * monitor to transfer the data across */ | |
543 | monitor_backing_page: | |
544 | _debug("- monitor add"); | |
545 | ||
546 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
5f4f9f4a | 547 | cachefiles_gfp); |
9ae326a6 DH |
548 | if (ret < 0) { |
549 | if (ret == -EEXIST) { | |
550 | page_cache_release(netpage); | |
551 | continue; | |
552 | } | |
553 | goto nomem; | |
554 | } | |
555 | ||
556 | page_cache_get(netpage); | |
557 | if (!pagevec_add(&lru_pvec, netpage)) | |
558 | __pagevec_lru_add_file(&lru_pvec); | |
559 | ||
560 | /* install a monitor */ | |
561 | page_cache_get(netpage); | |
562 | monitor->netfs_page = netpage; | |
563 | ||
564 | page_cache_get(backpage); | |
565 | monitor->back_page = backpage; | |
566 | monitor->monitor.private = backpage; | |
567 | add_page_wait_queue(backpage, &monitor->monitor); | |
568 | monitor = NULL; | |
569 | ||
570 | /* but the page may have been read before the monitor was | |
571 | * installed, so the monitor may miss the event - so we have to | |
572 | * ensure that we do get one in such a case */ | |
573 | if (trylock_page(backpage)) { | |
574 | _debug("2unlock %p {%lx}", backpage, backpage->flags); | |
575 | unlock_page(backpage); | |
576 | } | |
577 | ||
578 | page_cache_release(backpage); | |
579 | backpage = NULL; | |
580 | ||
581 | page_cache_release(netpage); | |
582 | netpage = NULL; | |
583 | continue; | |
584 | ||
585 | /* if the backing page is already present, it can be in one of | |
586 | * three states: read in progress, read failed or read okay */ | |
587 | backing_page_already_present: | |
588 | _debug("- present %p", backpage); | |
589 | ||
590 | if (PageError(backpage)) | |
591 | goto io_error; | |
592 | ||
593 | if (PageUptodate(backpage)) | |
594 | goto backing_page_already_uptodate; | |
595 | ||
596 | _debug("- not ready %p{%lx}", backpage, backpage->flags); | |
597 | ||
598 | if (!trylock_page(backpage)) | |
599 | goto monitor_backing_page; | |
600 | ||
601 | if (PageError(backpage)) { | |
602 | _debug("error %lx", backpage->flags); | |
603 | unlock_page(backpage); | |
604 | goto io_error; | |
605 | } | |
606 | ||
607 | if (PageUptodate(backpage)) | |
608 | goto backing_page_already_uptodate_unlock; | |
609 | ||
610 | /* we've locked a page that's neither up to date nor erroneous, | |
611 | * so we need to attempt to read it again */ | |
612 | goto reread_backing_page; | |
613 | ||
614 | /* the backing page is already up to date, attach the netfs | |
615 | * page to the pagecache and LRU and copy the data across */ | |
616 | backing_page_already_uptodate_unlock: | |
617 | _debug("uptodate %lx", backpage->flags); | |
618 | unlock_page(backpage); | |
619 | backing_page_already_uptodate: | |
620 | _debug("- uptodate"); | |
621 | ||
622 | ret = add_to_page_cache(netpage, op->mapping, netpage->index, | |
5f4f9f4a | 623 | cachefiles_gfp); |
9ae326a6 DH |
624 | if (ret < 0) { |
625 | if (ret == -EEXIST) { | |
626 | page_cache_release(netpage); | |
627 | continue; | |
628 | } | |
629 | goto nomem; | |
630 | } | |
631 | ||
632 | copy_highpage(netpage, backpage); | |
633 | ||
634 | page_cache_release(backpage); | |
635 | backpage = NULL; | |
636 | ||
c4d6d8db | 637 | fscache_mark_page_cached(op, netpage); |
9ae326a6 DH |
638 | |
639 | page_cache_get(netpage); | |
640 | if (!pagevec_add(&lru_pvec, netpage)) | |
641 | __pagevec_lru_add_file(&lru_pvec); | |
642 | ||
c4d6d8db | 643 | /* the netpage is unlocked and marked up to date here */ |
9ae326a6 | 644 | fscache_end_io(op, netpage, 0); |
9f10523f | 645 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
646 | page_cache_release(netpage); |
647 | netpage = NULL; | |
648 | continue; | |
649 | } | |
650 | ||
651 | netpage = NULL; | |
652 | ||
653 | _debug("out"); | |
654 | ||
655 | out: | |
656 | /* tidy up */ | |
657 | pagevec_lru_add_file(&lru_pvec); | |
658 | ||
659 | if (newpage) | |
660 | page_cache_release(newpage); | |
661 | if (netpage) | |
662 | page_cache_release(netpage); | |
663 | if (backpage) | |
664 | page_cache_release(backpage); | |
665 | if (monitor) { | |
666 | fscache_put_retrieval(op); | |
667 | kfree(monitor); | |
668 | } | |
669 | ||
670 | list_for_each_entry_safe(netpage, _n, list, lru) { | |
671 | list_del(&netpage->lru); | |
672 | page_cache_release(netpage); | |
9f10523f | 673 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
674 | } |
675 | ||
676 | _leave(" = %d", ret); | |
677 | return ret; | |
678 | ||
679 | nomem: | |
680 | _debug("nomem"); | |
681 | ret = -ENOMEM; | |
682 | goto out; | |
683 | ||
684 | read_error: | |
685 | _debug("read error %d", ret); | |
686 | if (ret == -ENOMEM) | |
687 | goto out; | |
688 | io_error: | |
689 | cachefiles_io_error_obj(object, "Page read error on backing file"); | |
690 | ret = -ENOBUFS; | |
691 | goto out; | |
692 | } | |
693 | ||
694 | /* | |
695 | * read a list of pages from the cache or allocate blocks in which to store | |
696 | * them | |
697 | */ | |
698 | int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, | |
699 | struct list_head *pages, | |
700 | unsigned *nr_pages, | |
701 | gfp_t gfp) | |
702 | { | |
703 | struct cachefiles_object *object; | |
704 | struct cachefiles_cache *cache; | |
705 | struct list_head backpages; | |
706 | struct pagevec pagevec; | |
707 | struct inode *inode; | |
708 | struct page *page, *_n; | |
709 | unsigned shift, nrbackpages; | |
710 | int ret, ret2, space; | |
711 | ||
712 | object = container_of(op->op.object, | |
713 | struct cachefiles_object, fscache); | |
714 | cache = container_of(object->fscache.cache, | |
715 | struct cachefiles_cache, cache); | |
716 | ||
717 | _enter("{OBJ%x,%d},,%d,,", | |
718 | object->fscache.debug_id, atomic_read(&op->op.usage), | |
719 | *nr_pages); | |
720 | ||
721 | if (!object->backer) | |
9f10523f | 722 | goto all_enobufs; |
9ae326a6 DH |
723 | |
724 | space = 1; | |
725 | if (cachefiles_has_space(cache, 0, *nr_pages) < 0) | |
726 | space = 0; | |
727 | ||
728 | inode = object->backer->d_inode; | |
729 | ASSERT(S_ISREG(inode->i_mode)); | |
730 | ASSERT(inode->i_mapping->a_ops->bmap); | |
731 | ASSERT(inode->i_mapping->a_ops->readpages); | |
732 | ||
733 | /* calculate the shift required to use bmap */ | |
734 | if (inode->i_sb->s_blocksize > PAGE_SIZE) | |
9f10523f | 735 | goto all_enobufs; |
9ae326a6 DH |
736 | |
737 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | |
738 | ||
739 | pagevec_init(&pagevec, 0); | |
740 | ||
4fbf4291 | 741 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
8af7c124 | 742 | op->op.flags |= FSCACHE_OP_ASYNC; |
9ae326a6 DH |
743 | op->op.processor = cachefiles_read_copier; |
744 | ||
745 | INIT_LIST_HEAD(&backpages); | |
746 | nrbackpages = 0; | |
747 | ||
748 | ret = space ? -ENODATA : -ENOBUFS; | |
749 | list_for_each_entry_safe(page, _n, pages, lru) { | |
750 | sector_t block0, block; | |
751 | ||
752 | /* we assume the absence or presence of the first block is a | |
753 | * good enough indication for the page as a whole | |
754 | * - TODO: don't use bmap() for this as it is _not_ actually | |
755 | * good enough for this as it doesn't indicate errors, but | |
756 | * it's all we've got for the moment | |
757 | */ | |
758 | block0 = page->index; | |
759 | block0 <<= shift; | |
760 | ||
761 | block = inode->i_mapping->a_ops->bmap(inode->i_mapping, | |
762 | block0); | |
763 | _debug("%llx -> %llx", | |
764 | (unsigned long long) block0, | |
765 | (unsigned long long) block); | |
766 | ||
767 | if (block) { | |
768 | /* we have data - add it to the list to give to the | |
769 | * backing fs */ | |
770 | list_move(&page->lru, &backpages); | |
771 | (*nr_pages)--; | |
772 | nrbackpages++; | |
773 | } else if (space && pagevec_add(&pagevec, page) == 0) { | |
774 | fscache_mark_pages_cached(op, &pagevec); | |
9f10523f | 775 | fscache_retrieval_complete(op, 1); |
9ae326a6 | 776 | ret = -ENODATA; |
9f10523f DH |
777 | } else { |
778 | fscache_retrieval_complete(op, 1); | |
9ae326a6 DH |
779 | } |
780 | } | |
781 | ||
782 | if (pagevec_count(&pagevec) > 0) | |
783 | fscache_mark_pages_cached(op, &pagevec); | |
784 | ||
785 | if (list_empty(pages)) | |
786 | ret = 0; | |
787 | ||
788 | /* submit the apparently valid pages to the backing fs to be read from | |
789 | * disk */ | |
790 | if (nrbackpages > 0) { | |
c4d6d8db | 791 | ret2 = cachefiles_read_backing_file(object, op, &backpages); |
9ae326a6 DH |
792 | if (ret2 == -ENOMEM || ret2 == -EINTR) |
793 | ret = ret2; | |
794 | } | |
795 | ||
9ae326a6 DH |
796 | _leave(" = %d [nr=%u%s]", |
797 | ret, *nr_pages, list_empty(pages) ? " empty" : ""); | |
798 | return ret; | |
9f10523f DH |
799 | |
800 | all_enobufs: | |
801 | fscache_retrieval_complete(op, *nr_pages); | |
802 | return -ENOBUFS; | |
9ae326a6 DH |
803 | } |
804 | ||
805 | /* | |
806 | * allocate a block in the cache in which to store a page | |
807 | * - cache withdrawal is prevented by the caller | |
808 | * - returns -EINTR if interrupted | |
809 | * - returns -ENOMEM if ran out of memory | |
810 | * - returns -ENOBUFS if no buffers can be made available | |
811 | * - returns -ENOBUFS if page is beyond EOF | |
812 | * - otherwise: | |
813 | * - the metadata will be retained | |
814 | * - 0 will be returned | |
815 | */ | |
816 | int cachefiles_allocate_page(struct fscache_retrieval *op, | |
817 | struct page *page, | |
818 | gfp_t gfp) | |
819 | { | |
820 | struct cachefiles_object *object; | |
821 | struct cachefiles_cache *cache; | |
9ae326a6 DH |
822 | int ret; |
823 | ||
824 | object = container_of(op->op.object, | |
825 | struct cachefiles_object, fscache); | |
826 | cache = container_of(object->fscache.cache, | |
827 | struct cachefiles_cache, cache); | |
828 | ||
829 | _enter("%p,{%lx},", object, page->index); | |
830 | ||
831 | ret = cachefiles_has_space(cache, 0, 1); | |
c4d6d8db DH |
832 | if (ret == 0) |
833 | fscache_mark_page_cached(op, page); | |
834 | else | |
9ae326a6 | 835 | ret = -ENOBUFS; |
9ae326a6 | 836 | |
9f10523f | 837 | fscache_retrieval_complete(op, 1); |
9ae326a6 DH |
838 | _leave(" = %d", ret); |
839 | return ret; | |
840 | } | |
841 | ||
842 | /* | |
843 | * allocate blocks in the cache in which to store a set of pages | |
844 | * - cache withdrawal is prevented by the caller | |
845 | * - returns -EINTR if interrupted | |
846 | * - returns -ENOMEM if ran out of memory | |
847 | * - returns -ENOBUFS if some buffers couldn't be made available | |
848 | * - returns -ENOBUFS if some pages are beyond EOF | |
849 | * - otherwise: | |
850 | * - -ENODATA will be returned | |
851 | * - metadata will be retained for any page marked | |
852 | */ | |
853 | int cachefiles_allocate_pages(struct fscache_retrieval *op, | |
854 | struct list_head *pages, | |
855 | unsigned *nr_pages, | |
856 | gfp_t gfp) | |
857 | { | |
858 | struct cachefiles_object *object; | |
859 | struct cachefiles_cache *cache; | |
860 | struct pagevec pagevec; | |
861 | struct page *page; | |
862 | int ret; | |
863 | ||
864 | object = container_of(op->op.object, | |
865 | struct cachefiles_object, fscache); | |
866 | cache = container_of(object->fscache.cache, | |
867 | struct cachefiles_cache, cache); | |
868 | ||
869 | _enter("%p,,,%d,", object, *nr_pages); | |
870 | ||
871 | ret = cachefiles_has_space(cache, 0, *nr_pages); | |
872 | if (ret == 0) { | |
873 | pagevec_init(&pagevec, 0); | |
874 | ||
875 | list_for_each_entry(page, pages, lru) { | |
876 | if (pagevec_add(&pagevec, page) == 0) | |
877 | fscache_mark_pages_cached(op, &pagevec); | |
878 | } | |
879 | ||
880 | if (pagevec_count(&pagevec) > 0) | |
881 | fscache_mark_pages_cached(op, &pagevec); | |
882 | ret = -ENODATA; | |
883 | } else { | |
884 | ret = -ENOBUFS; | |
885 | } | |
886 | ||
9f10523f | 887 | fscache_retrieval_complete(op, *nr_pages); |
9ae326a6 DH |
888 | _leave(" = %d", ret); |
889 | return ret; | |
890 | } | |
891 | ||
892 | /* | |
893 | * request a page be stored in the cache | |
894 | * - cache withdrawal is prevented by the caller | |
895 | * - this request may be ignored if there's no cache block available, in which | |
896 | * case -ENOBUFS will be returned | |
897 | * - if the op is in progress, 0 will be returned | |
898 | */ | |
899 | int cachefiles_write_page(struct fscache_storage *op, struct page *page) | |
900 | { | |
901 | struct cachefiles_object *object; | |
902 | struct cachefiles_cache *cache; | |
903 | mm_segment_t old_fs; | |
904 | struct file *file; | |
765927b2 | 905 | struct path path; |
a17754fb DH |
906 | loff_t pos, eof; |
907 | size_t len; | |
9ae326a6 DH |
908 | void *data; |
909 | int ret; | |
910 | ||
911 | ASSERT(op != NULL); | |
912 | ASSERT(page != NULL); | |
913 | ||
914 | object = container_of(op->op.object, | |
915 | struct cachefiles_object, fscache); | |
916 | ||
917 | _enter("%p,%p{%lx},,,", object, page, page->index); | |
918 | ||
919 | if (!object->backer) { | |
920 | _leave(" = -ENOBUFS"); | |
921 | return -ENOBUFS; | |
922 | } | |
923 | ||
924 | ASSERT(S_ISREG(object->backer->d_inode->i_mode)); | |
925 | ||
926 | cache = container_of(object->fscache.cache, | |
927 | struct cachefiles_cache, cache); | |
928 | ||
929 | /* write the page to the backing filesystem and let it store it in its | |
930 | * own time */ | |
765927b2 AV |
931 | path.mnt = cache->mnt; |
932 | path.dentry = object->backer; | |
98c350cd | 933 | file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); |
9ae326a6 DH |
934 | if (IS_ERR(file)) { |
935 | ret = PTR_ERR(file); | |
936 | } else { | |
937 | ret = -EIO; | |
938 | if (file->f_op->write) { | |
939 | pos = (loff_t) page->index << PAGE_SHIFT; | |
a17754fb DH |
940 | |
941 | /* we mustn't write more data than we have, so we have | |
942 | * to beware of a partial page at EOF */ | |
943 | eof = object->fscache.store_limit_l; | |
944 | len = PAGE_SIZE; | |
945 | if (eof & ~PAGE_MASK) { | |
946 | ASSERTCMP(pos, <, eof); | |
947 | if (eof - pos < PAGE_SIZE) { | |
948 | _debug("cut short %llx to %llx", | |
949 | pos, eof); | |
950 | len = eof - pos; | |
951 | ASSERTCMP(pos + len, ==, eof); | |
952 | } | |
953 | } | |
954 | ||
9ae326a6 DH |
955 | data = kmap(page); |
956 | old_fs = get_fs(); | |
957 | set_fs(KERNEL_DS); | |
958 | ret = file->f_op->write( | |
a17754fb | 959 | file, (const void __user *) data, len, &pos); |
9ae326a6 DH |
960 | set_fs(old_fs); |
961 | kunmap(page); | |
a17754fb | 962 | if (ret != len) |
9ae326a6 DH |
963 | ret = -EIO; |
964 | } | |
965 | fput(file); | |
966 | } | |
967 | ||
968 | if (ret < 0) { | |
969 | if (ret == -EIO) | |
970 | cachefiles_io_error_obj( | |
971 | object, "Write page to backing file failed"); | |
972 | ret = -ENOBUFS; | |
973 | } | |
974 | ||
975 | _leave(" = %d", ret); | |
976 | return ret; | |
977 | } | |
978 | ||
979 | /* | |
980 | * detach a backing block from a page | |
981 | * - cache withdrawal is prevented by the caller | |
982 | */ | |
983 | void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) | |
984 | { | |
985 | struct cachefiles_object *object; | |
986 | struct cachefiles_cache *cache; | |
987 | ||
988 | object = container_of(_object, struct cachefiles_object, fscache); | |
989 | cache = container_of(object->fscache.cache, | |
990 | struct cachefiles_cache, cache); | |
991 | ||
992 | _enter("%p,{%lu}", object, page->index); | |
993 | ||
994 | spin_unlock(&object->fscache.cookie->lock); | |
995 | } |