FS-Cache: Check that there are no read ops when cookie relinquished
[deliverable/linux.git] / fs / cachefiles / rdwr.c
CommitLineData
9ae326a6
DH
1/* Storage object read/write
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/mount.h>
5a0e3ad6 13#include <linux/slab.h>
9ae326a6
DH
14#include <linux/file.h>
15#include "internal.h"
16
17/*
18 * detect wake up events generated by the unlocking of pages in which we're
19 * interested
20 * - we use this to detect read completion of backing pages
21 * - the caller holds the waitqueue lock
22 */
23static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
24 int sync, void *_key)
25{
26 struct cachefiles_one_read *monitor =
27 container_of(wait, struct cachefiles_one_read, monitor);
28 struct cachefiles_object *object;
29 struct wait_bit_key *key = _key;
30 struct page *page = wait->private;
31
32 ASSERT(key);
33
34 _enter("{%lu},%u,%d,{%p,%u}",
35 monitor->netfs_page->index, mode, sync,
36 key->flags, key->bit_nr);
37
38 if (key->flags != &page->flags ||
39 key->bit_nr != PG_locked)
40 return 0;
41
42 _debug("--- monitor %p %lx ---", page, page->flags);
43
5e929b33
DH
44 if (!PageUptodate(page) && !PageError(page)) {
45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
47 }
9ae326a6
DH
48
49 /* remove from the waitqueue */
50 list_del(&wait->task_list);
51
52 /* move onto the action list and queue for FS-Cache thread pool */
53 ASSERT(monitor->op);
54
55 object = container_of(monitor->op->op.object,
56 struct cachefiles_object, fscache);
57
58 spin_lock(&object->work_lock);
59 list_add_tail(&monitor->op_link, &monitor->op->to_do);
60 spin_unlock(&object->work_lock);
61
62 fscache_enqueue_retrieval(monitor->op);
63 return 0;
64}
65
5e929b33
DH
66/*
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
69 * possible
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
72 */
73static int cachefiles_read_reissue(struct cachefiles_object *object,
74 struct cachefiles_one_read *monitor)
75{
76 struct address_space *bmapping = object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->back_page, *backpage2;
78 int ret;
79
80 kenter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags);
83
84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) {
86 kleave(" = -ENODATA [mapping]");
87 return -ENODATA;
88 }
89
90 backpage2 = find_get_page(bmapping, backpage->index);
91 if (!backpage2) {
92 kleave(" = -ENODATA [gone]");
93 return -ENODATA;
94 }
95
96 if (backpage != backpage2) {
97 put_page(backpage2);
98 kleave(" = -ENODATA [different]");
99 return -ENODATA;
100 }
101
102 /* the page is still there and we already have a ref on it, so we don't
103 * need a second */
104 put_page(backpage2);
105
106 INIT_LIST_HEAD(&monitor->op_link);
107 add_page_wait_queue(backpage, &monitor->monitor);
108
109 if (trylock_page(backpage)) {
110 ret = -EIO;
111 if (PageError(backpage))
112 goto unlock_discard;
113 ret = 0;
114 if (PageUptodate(backpage))
115 goto unlock_discard;
116
117 kdebug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage);
119 if (ret < 0)
120 goto unlock_discard;
121 }
122
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage)) {
127 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
128 unlock_page(backpage);
129 }
130
131 /* it'll reappear on the todo list */
132 kleave(" = -EINPROGRESS");
133 return -EINPROGRESS;
134
135unlock_discard:
136 unlock_page(backpage);
137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock);
140 kleave(" = %d", ret);
141 return ret;
142}
143
9ae326a6
DH
144/*
145 * copy data from backing pages to netfs pages to complete a read operation
146 * - driven by FS-Cache's thread pool
147 */
148static void cachefiles_read_copier(struct fscache_operation *_op)
149{
150 struct cachefiles_one_read *monitor;
151 struct cachefiles_object *object;
152 struct fscache_retrieval *op;
153 struct pagevec pagevec;
154 int error, max;
155
156 op = container_of(_op, struct fscache_retrieval, op);
157 object = container_of(op->op.object,
158 struct cachefiles_object, fscache);
159
160 _enter("{ino=%lu}", object->backer->d_inode->i_ino);
161
162 pagevec_init(&pagevec, 0);
163
164 max = 8;
165 spin_lock_irq(&object->work_lock);
166
167 while (!list_empty(&op->to_do)) {
168 monitor = list_entry(op->to_do.next,
169 struct cachefiles_one_read, op_link);
170 list_del(&monitor->op_link);
171
172 spin_unlock_irq(&object->work_lock);
173
174 _debug("- copy {%lu}", monitor->back_page->index);
175
5e929b33 176 recheck:
9ae326a6
DH
177 if (PageUptodate(monitor->back_page)) {
178 copy_highpage(monitor->netfs_page, monitor->back_page);
c4d6d8db
DH
179 fscache_mark_page_cached(monitor->op,
180 monitor->netfs_page);
9ae326a6 181 error = 0;
5e929b33
DH
182 } else if (!PageError(monitor->back_page)) {
183 /* the page has probably been truncated */
184 error = cachefiles_read_reissue(object, monitor);
185 if (error == -EINPROGRESS)
186 goto next;
187 goto recheck;
188 } else {
9ae326a6
DH
189 cachefiles_io_error_obj(
190 object,
191 "Readpage failed on backing file %lx",
192 (unsigned long) monitor->back_page->flags);
5e929b33
DH
193 error = -EIO;
194 }
9ae326a6
DH
195
196 page_cache_release(monitor->back_page);
197
198 fscache_end_io(op, monitor->netfs_page, error);
199 page_cache_release(monitor->netfs_page);
200 fscache_put_retrieval(op);
201 kfree(monitor);
202
5e929b33 203 next:
9ae326a6
DH
204 /* let the thread pool have some air occasionally */
205 max--;
206 if (max < 0 || need_resched()) {
207 if (!list_empty(&op->to_do))
208 fscache_enqueue_retrieval(op);
209 _leave(" [maxed out]");
210 return;
211 }
212
213 spin_lock_irq(&object->work_lock);
214 }
215
216 spin_unlock_irq(&object->work_lock);
217 _leave("");
218}
219
220/*
221 * read the corresponding page to the given set from the backing file
222 * - an uncertain page is simply discarded, to be tried again another time
223 */
224static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
225 struct fscache_retrieval *op,
226 struct page *netpage,
227 struct pagevec *pagevec)
228{
229 struct cachefiles_one_read *monitor;
230 struct address_space *bmapping;
231 struct page *newpage, *backpage;
232 int ret;
233
234 _enter("");
235
236 pagevec_reinit(pagevec);
237
238 _debug("read back %p{%lu,%d}",
239 netpage, netpage->index, page_count(netpage));
240
5f4f9f4a 241 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
9ae326a6
DH
242 if (!monitor)
243 goto nomem;
244
245 monitor->netfs_page = netpage;
246 monitor->op = fscache_get_retrieval(op);
247
248 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
249
250 /* attempt to get hold of the backing page */
251 bmapping = object->backer->d_inode->i_mapping;
252 newpage = NULL;
253
254 for (;;) {
255 backpage = find_get_page(bmapping, netpage->index);
256 if (backpage)
257 goto backing_page_already_present;
258
259 if (!newpage) {
5f4f9f4a
DH
260 newpage = __page_cache_alloc(cachefiles_gfp |
261 __GFP_COLD);
9ae326a6
DH
262 if (!newpage)
263 goto nomem_monitor;
264 }
265
266 ret = add_to_page_cache(newpage, bmapping,
5f4f9f4a 267 netpage->index, cachefiles_gfp);
9ae326a6
DH
268 if (ret == 0)
269 goto installed_new_backing_page;
270 if (ret != -EEXIST)
271 goto nomem_page;
272 }
273
274 /* we've installed a new backing page, so now we need to add it
275 * to the LRU list and start it reading */
276installed_new_backing_page:
277 _debug("- new %p", newpage);
278
279 backpage = newpage;
280 newpage = NULL;
281
282 page_cache_get(backpage);
283 pagevec_add(pagevec, backpage);
284 __pagevec_lru_add_file(pagevec);
285
286read_backing_page:
287 ret = bmapping->a_ops->readpage(NULL, backpage);
288 if (ret < 0)
289 goto read_error;
290
291 /* set the monitor to transfer the data across */
292monitor_backing_page:
293 _debug("- monitor add");
294
295 /* install the monitor */
296 page_cache_get(monitor->netfs_page);
297 page_cache_get(backpage);
298 monitor->back_page = backpage;
299 monitor->monitor.private = backpage;
300 add_page_wait_queue(backpage, &monitor->monitor);
301 monitor = NULL;
302
303 /* but the page may have been read before the monitor was installed, so
304 * the monitor may miss the event - so we have to ensure that we do get
305 * one in such a case */
306 if (trylock_page(backpage)) {
307 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
308 unlock_page(backpage);
309 }
310 goto success;
311
312 /* if the backing page is already present, it can be in one of
313 * three states: read in progress, read failed or read okay */
314backing_page_already_present:
315 _debug("- present");
316
317 if (newpage) {
318 page_cache_release(newpage);
319 newpage = NULL;
320 }
321
322 if (PageError(backpage))
323 goto io_error;
324
325 if (PageUptodate(backpage))
326 goto backing_page_already_uptodate;
327
328 if (!trylock_page(backpage))
329 goto monitor_backing_page;
330 _debug("read %p {%lx}", backpage, backpage->flags);
331 goto read_backing_page;
332
333 /* the backing page is already up to date, attach the netfs
334 * page to the pagecache and LRU and copy the data across */
335backing_page_already_uptodate:
336 _debug("- uptodate");
337
c4d6d8db 338 fscache_mark_page_cached(op, netpage);
9ae326a6
DH
339
340 copy_highpage(netpage, backpage);
341 fscache_end_io(op, netpage, 0);
342
343success:
344 _debug("success");
345 ret = 0;
346
347out:
348 if (backpage)
349 page_cache_release(backpage);
350 if (monitor) {
351 fscache_put_retrieval(monitor->op);
352 kfree(monitor);
353 }
354 _leave(" = %d", ret);
355 return ret;
356
357read_error:
358 _debug("read error %d", ret);
359 if (ret == -ENOMEM)
360 goto out;
361io_error:
362 cachefiles_io_error_obj(object, "Page read error on backing file");
363 ret = -ENOBUFS;
364 goto out;
365
366nomem_page:
367 page_cache_release(newpage);
368nomem_monitor:
369 fscache_put_retrieval(monitor->op);
370 kfree(monitor);
371nomem:
372 _leave(" = -ENOMEM");
373 return -ENOMEM;
374}
375
376/*
377 * read a page from the cache or allocate a block in which to store it
378 * - cache withdrawal is prevented by the caller
379 * - returns -EINTR if interrupted
380 * - returns -ENOMEM if ran out of memory
381 * - returns -ENOBUFS if no buffers can be made available
382 * - returns -ENOBUFS if page is beyond EOF
383 * - if the page is backed by a block in the cache:
384 * - a read will be started which will call the callback on completion
385 * - 0 will be returned
386 * - else if the page is unbacked:
387 * - the metadata will be retained
388 * - -ENODATA will be returned
389 */
390int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
391 struct page *page,
392 gfp_t gfp)
393{
394 struct cachefiles_object *object;
395 struct cachefiles_cache *cache;
396 struct pagevec pagevec;
397 struct inode *inode;
398 sector_t block0, block;
399 unsigned shift;
400 int ret;
401
402 object = container_of(op->op.object,
403 struct cachefiles_object, fscache);
404 cache = container_of(object->fscache.cache,
405 struct cachefiles_cache, cache);
406
407 _enter("{%p},{%lx},,,", object, page->index);
408
409 if (!object->backer)
410 return -ENOBUFS;
411
412 inode = object->backer->d_inode;
413 ASSERT(S_ISREG(inode->i_mode));
414 ASSERT(inode->i_mapping->a_ops->bmap);
415 ASSERT(inode->i_mapping->a_ops->readpages);
416
417 /* calculate the shift required to use bmap */
418 if (inode->i_sb->s_blocksize > PAGE_SIZE)
419 return -ENOBUFS;
420
421 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
422
4fbf4291 423 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
8af7c124 424 op->op.flags |= FSCACHE_OP_ASYNC;
9ae326a6
DH
425 op->op.processor = cachefiles_read_copier;
426
427 pagevec_init(&pagevec, 0);
428
429 /* we assume the absence or presence of the first block is a good
430 * enough indication for the page as a whole
431 * - TODO: don't use bmap() for this as it is _not_ actually good
432 * enough for this as it doesn't indicate errors, but it's all we've
433 * got for the moment
434 */
435 block0 = page->index;
436 block0 <<= shift;
437
438 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
439 _debug("%llx -> %llx",
440 (unsigned long long) block0,
441 (unsigned long long) block);
442
443 if (block) {
444 /* submit the apparently valid page to the backing fs to be
445 * read from disk */
446 ret = cachefiles_read_backing_file_one(object, op, page,
447 &pagevec);
448 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
449 /* there's space in the cache we can use */
c4d6d8db 450 fscache_mark_page_cached(op, page);
9ae326a6
DH
451 ret = -ENODATA;
452 } else {
453 ret = -ENOBUFS;
454 }
455
456 _leave(" = %d", ret);
457 return ret;
458}
459
460/*
461 * read the corresponding pages to the given set from the backing file
462 * - any uncertain pages are simply discarded, to be tried again another time
463 */
464static int cachefiles_read_backing_file(struct cachefiles_object *object,
465 struct fscache_retrieval *op,
c4d6d8db 466 struct list_head *list)
9ae326a6
DH
467{
468 struct cachefiles_one_read *monitor = NULL;
469 struct address_space *bmapping = object->backer->d_inode->i_mapping;
470 struct pagevec lru_pvec;
471 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
472 int ret = 0;
473
474 _enter("");
475
476 pagevec_init(&lru_pvec, 0);
477
478 list_for_each_entry_safe(netpage, _n, list, lru) {
479 list_del(&netpage->lru);
480
481 _debug("read back %p{%lu,%d}",
482 netpage, netpage->index, page_count(netpage));
483
484 if (!monitor) {
5f4f9f4a 485 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
9ae326a6
DH
486 if (!monitor)
487 goto nomem;
488
489 monitor->op = fscache_get_retrieval(op);
490 init_waitqueue_func_entry(&monitor->monitor,
491 cachefiles_read_waiter);
492 }
493
494 for (;;) {
495 backpage = find_get_page(bmapping, netpage->index);
496 if (backpage)
497 goto backing_page_already_present;
498
499 if (!newpage) {
5f4f9f4a
DH
500 newpage = __page_cache_alloc(cachefiles_gfp |
501 __GFP_COLD);
9ae326a6
DH
502 if (!newpage)
503 goto nomem;
504 }
505
506 ret = add_to_page_cache(newpage, bmapping,
5f4f9f4a 507 netpage->index, cachefiles_gfp);
9ae326a6
DH
508 if (ret == 0)
509 goto installed_new_backing_page;
510 if (ret != -EEXIST)
511 goto nomem;
512 }
513
514 /* we've installed a new backing page, so now we need to add it
515 * to the LRU list and start it reading */
516 installed_new_backing_page:
517 _debug("- new %p", newpage);
518
519 backpage = newpage;
520 newpage = NULL;
521
522 page_cache_get(backpage);
523 if (!pagevec_add(&lru_pvec, backpage))
524 __pagevec_lru_add_file(&lru_pvec);
525
526 reread_backing_page:
527 ret = bmapping->a_ops->readpage(NULL, backpage);
528 if (ret < 0)
529 goto read_error;
530
531 /* add the netfs page to the pagecache and LRU, and set the
532 * monitor to transfer the data across */
533 monitor_backing_page:
534 _debug("- monitor add");
535
536 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
5f4f9f4a 537 cachefiles_gfp);
9ae326a6
DH
538 if (ret < 0) {
539 if (ret == -EEXIST) {
540 page_cache_release(netpage);
541 continue;
542 }
543 goto nomem;
544 }
545
546 page_cache_get(netpage);
547 if (!pagevec_add(&lru_pvec, netpage))
548 __pagevec_lru_add_file(&lru_pvec);
549
550 /* install a monitor */
551 page_cache_get(netpage);
552 monitor->netfs_page = netpage;
553
554 page_cache_get(backpage);
555 monitor->back_page = backpage;
556 monitor->monitor.private = backpage;
557 add_page_wait_queue(backpage, &monitor->monitor);
558 monitor = NULL;
559
560 /* but the page may have been read before the monitor was
561 * installed, so the monitor may miss the event - so we have to
562 * ensure that we do get one in such a case */
563 if (trylock_page(backpage)) {
564 _debug("2unlock %p {%lx}", backpage, backpage->flags);
565 unlock_page(backpage);
566 }
567
568 page_cache_release(backpage);
569 backpage = NULL;
570
571 page_cache_release(netpage);
572 netpage = NULL;
573 continue;
574
575 /* if the backing page is already present, it can be in one of
576 * three states: read in progress, read failed or read okay */
577 backing_page_already_present:
578 _debug("- present %p", backpage);
579
580 if (PageError(backpage))
581 goto io_error;
582
583 if (PageUptodate(backpage))
584 goto backing_page_already_uptodate;
585
586 _debug("- not ready %p{%lx}", backpage, backpage->flags);
587
588 if (!trylock_page(backpage))
589 goto monitor_backing_page;
590
591 if (PageError(backpage)) {
592 _debug("error %lx", backpage->flags);
593 unlock_page(backpage);
594 goto io_error;
595 }
596
597 if (PageUptodate(backpage))
598 goto backing_page_already_uptodate_unlock;
599
600 /* we've locked a page that's neither up to date nor erroneous,
601 * so we need to attempt to read it again */
602 goto reread_backing_page;
603
604 /* the backing page is already up to date, attach the netfs
605 * page to the pagecache and LRU and copy the data across */
606 backing_page_already_uptodate_unlock:
607 _debug("uptodate %lx", backpage->flags);
608 unlock_page(backpage);
609 backing_page_already_uptodate:
610 _debug("- uptodate");
611
612 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
5f4f9f4a 613 cachefiles_gfp);
9ae326a6
DH
614 if (ret < 0) {
615 if (ret == -EEXIST) {
616 page_cache_release(netpage);
617 continue;
618 }
619 goto nomem;
620 }
621
622 copy_highpage(netpage, backpage);
623
624 page_cache_release(backpage);
625 backpage = NULL;
626
c4d6d8db 627 fscache_mark_page_cached(op, netpage);
9ae326a6
DH
628
629 page_cache_get(netpage);
630 if (!pagevec_add(&lru_pvec, netpage))
631 __pagevec_lru_add_file(&lru_pvec);
632
c4d6d8db 633 /* the netpage is unlocked and marked up to date here */
9ae326a6
DH
634 fscache_end_io(op, netpage, 0);
635 page_cache_release(netpage);
636 netpage = NULL;
637 continue;
638 }
639
640 netpage = NULL;
641
642 _debug("out");
643
644out:
645 /* tidy up */
646 pagevec_lru_add_file(&lru_pvec);
647
648 if (newpage)
649 page_cache_release(newpage);
650 if (netpage)
651 page_cache_release(netpage);
652 if (backpage)
653 page_cache_release(backpage);
654 if (monitor) {
655 fscache_put_retrieval(op);
656 kfree(monitor);
657 }
658
659 list_for_each_entry_safe(netpage, _n, list, lru) {
660 list_del(&netpage->lru);
661 page_cache_release(netpage);
662 }
663
664 _leave(" = %d", ret);
665 return ret;
666
667nomem:
668 _debug("nomem");
669 ret = -ENOMEM;
670 goto out;
671
672read_error:
673 _debug("read error %d", ret);
674 if (ret == -ENOMEM)
675 goto out;
676io_error:
677 cachefiles_io_error_obj(object, "Page read error on backing file");
678 ret = -ENOBUFS;
679 goto out;
680}
681
682/*
683 * read a list of pages from the cache or allocate blocks in which to store
684 * them
685 */
686int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
687 struct list_head *pages,
688 unsigned *nr_pages,
689 gfp_t gfp)
690{
691 struct cachefiles_object *object;
692 struct cachefiles_cache *cache;
693 struct list_head backpages;
694 struct pagevec pagevec;
695 struct inode *inode;
696 struct page *page, *_n;
697 unsigned shift, nrbackpages;
698 int ret, ret2, space;
699
700 object = container_of(op->op.object,
701 struct cachefiles_object, fscache);
702 cache = container_of(object->fscache.cache,
703 struct cachefiles_cache, cache);
704
705 _enter("{OBJ%x,%d},,%d,,",
706 object->fscache.debug_id, atomic_read(&op->op.usage),
707 *nr_pages);
708
709 if (!object->backer)
710 return -ENOBUFS;
711
712 space = 1;
713 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
714 space = 0;
715
716 inode = object->backer->d_inode;
717 ASSERT(S_ISREG(inode->i_mode));
718 ASSERT(inode->i_mapping->a_ops->bmap);
719 ASSERT(inode->i_mapping->a_ops->readpages);
720
721 /* calculate the shift required to use bmap */
722 if (inode->i_sb->s_blocksize > PAGE_SIZE)
723 return -ENOBUFS;
724
725 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
726
727 pagevec_init(&pagevec, 0);
728
4fbf4291 729 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
8af7c124 730 op->op.flags |= FSCACHE_OP_ASYNC;
9ae326a6
DH
731 op->op.processor = cachefiles_read_copier;
732
733 INIT_LIST_HEAD(&backpages);
734 nrbackpages = 0;
735
736 ret = space ? -ENODATA : -ENOBUFS;
737 list_for_each_entry_safe(page, _n, pages, lru) {
738 sector_t block0, block;
739
740 /* we assume the absence or presence of the first block is a
741 * good enough indication for the page as a whole
742 * - TODO: don't use bmap() for this as it is _not_ actually
743 * good enough for this as it doesn't indicate errors, but
744 * it's all we've got for the moment
745 */
746 block0 = page->index;
747 block0 <<= shift;
748
749 block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
750 block0);
751 _debug("%llx -> %llx",
752 (unsigned long long) block0,
753 (unsigned long long) block);
754
755 if (block) {
756 /* we have data - add it to the list to give to the
757 * backing fs */
758 list_move(&page->lru, &backpages);
759 (*nr_pages)--;
760 nrbackpages++;
761 } else if (space && pagevec_add(&pagevec, page) == 0) {
762 fscache_mark_pages_cached(op, &pagevec);
763 ret = -ENODATA;
764 }
765 }
766
767 if (pagevec_count(&pagevec) > 0)
768 fscache_mark_pages_cached(op, &pagevec);
769
770 if (list_empty(pages))
771 ret = 0;
772
773 /* submit the apparently valid pages to the backing fs to be read from
774 * disk */
775 if (nrbackpages > 0) {
c4d6d8db 776 ret2 = cachefiles_read_backing_file(object, op, &backpages);
9ae326a6
DH
777 if (ret2 == -ENOMEM || ret2 == -EINTR)
778 ret = ret2;
779 }
780
9ae326a6
DH
781 _leave(" = %d [nr=%u%s]",
782 ret, *nr_pages, list_empty(pages) ? " empty" : "");
783 return ret;
784}
785
786/*
787 * allocate a block in the cache in which to store a page
788 * - cache withdrawal is prevented by the caller
789 * - returns -EINTR if interrupted
790 * - returns -ENOMEM if ran out of memory
791 * - returns -ENOBUFS if no buffers can be made available
792 * - returns -ENOBUFS if page is beyond EOF
793 * - otherwise:
794 * - the metadata will be retained
795 * - 0 will be returned
796 */
797int cachefiles_allocate_page(struct fscache_retrieval *op,
798 struct page *page,
799 gfp_t gfp)
800{
801 struct cachefiles_object *object;
802 struct cachefiles_cache *cache;
9ae326a6
DH
803 int ret;
804
805 object = container_of(op->op.object,
806 struct cachefiles_object, fscache);
807 cache = container_of(object->fscache.cache,
808 struct cachefiles_cache, cache);
809
810 _enter("%p,{%lx},", object, page->index);
811
812 ret = cachefiles_has_space(cache, 0, 1);
c4d6d8db
DH
813 if (ret == 0)
814 fscache_mark_page_cached(op, page);
815 else
9ae326a6 816 ret = -ENOBUFS;
9ae326a6
DH
817
818 _leave(" = %d", ret);
819 return ret;
820}
821
822/*
823 * allocate blocks in the cache in which to store a set of pages
824 * - cache withdrawal is prevented by the caller
825 * - returns -EINTR if interrupted
826 * - returns -ENOMEM if ran out of memory
827 * - returns -ENOBUFS if some buffers couldn't be made available
828 * - returns -ENOBUFS if some pages are beyond EOF
829 * - otherwise:
830 * - -ENODATA will be returned
831 * - metadata will be retained for any page marked
832 */
833int cachefiles_allocate_pages(struct fscache_retrieval *op,
834 struct list_head *pages,
835 unsigned *nr_pages,
836 gfp_t gfp)
837{
838 struct cachefiles_object *object;
839 struct cachefiles_cache *cache;
840 struct pagevec pagevec;
841 struct page *page;
842 int ret;
843
844 object = container_of(op->op.object,
845 struct cachefiles_object, fscache);
846 cache = container_of(object->fscache.cache,
847 struct cachefiles_cache, cache);
848
849 _enter("%p,,,%d,", object, *nr_pages);
850
851 ret = cachefiles_has_space(cache, 0, *nr_pages);
852 if (ret == 0) {
853 pagevec_init(&pagevec, 0);
854
855 list_for_each_entry(page, pages, lru) {
856 if (pagevec_add(&pagevec, page) == 0)
857 fscache_mark_pages_cached(op, &pagevec);
858 }
859
860 if (pagevec_count(&pagevec) > 0)
861 fscache_mark_pages_cached(op, &pagevec);
862 ret = -ENODATA;
863 } else {
864 ret = -ENOBUFS;
865 }
866
867 _leave(" = %d", ret);
868 return ret;
869}
870
871/*
872 * request a page be stored in the cache
873 * - cache withdrawal is prevented by the caller
874 * - this request may be ignored if there's no cache block available, in which
875 * case -ENOBUFS will be returned
876 * - if the op is in progress, 0 will be returned
877 */
878int cachefiles_write_page(struct fscache_storage *op, struct page *page)
879{
880 struct cachefiles_object *object;
881 struct cachefiles_cache *cache;
882 mm_segment_t old_fs;
883 struct file *file;
765927b2 884 struct path path;
a17754fb
DH
885 loff_t pos, eof;
886 size_t len;
9ae326a6
DH
887 void *data;
888 int ret;
889
890 ASSERT(op != NULL);
891 ASSERT(page != NULL);
892
893 object = container_of(op->op.object,
894 struct cachefiles_object, fscache);
895
896 _enter("%p,%p{%lx},,,", object, page, page->index);
897
898 if (!object->backer) {
899 _leave(" = -ENOBUFS");
900 return -ENOBUFS;
901 }
902
903 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
904
905 cache = container_of(object->fscache.cache,
906 struct cachefiles_cache, cache);
907
908 /* write the page to the backing filesystem and let it store it in its
909 * own time */
765927b2
AV
910 path.mnt = cache->mnt;
911 path.dentry = object->backer;
98c350cd 912 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
9ae326a6
DH
913 if (IS_ERR(file)) {
914 ret = PTR_ERR(file);
915 } else {
916 ret = -EIO;
917 if (file->f_op->write) {
918 pos = (loff_t) page->index << PAGE_SHIFT;
a17754fb
DH
919
920 /* we mustn't write more data than we have, so we have
921 * to beware of a partial page at EOF */
922 eof = object->fscache.store_limit_l;
923 len = PAGE_SIZE;
924 if (eof & ~PAGE_MASK) {
925 ASSERTCMP(pos, <, eof);
926 if (eof - pos < PAGE_SIZE) {
927 _debug("cut short %llx to %llx",
928 pos, eof);
929 len = eof - pos;
930 ASSERTCMP(pos + len, ==, eof);
931 }
932 }
933
9ae326a6
DH
934 data = kmap(page);
935 old_fs = get_fs();
936 set_fs(KERNEL_DS);
937 ret = file->f_op->write(
a17754fb 938 file, (const void __user *) data, len, &pos);
9ae326a6
DH
939 set_fs(old_fs);
940 kunmap(page);
a17754fb 941 if (ret != len)
9ae326a6
DH
942 ret = -EIO;
943 }
944 fput(file);
945 }
946
947 if (ret < 0) {
948 if (ret == -EIO)
949 cachefiles_io_error_obj(
950 object, "Write page to backing file failed");
951 ret = -ENOBUFS;
952 }
953
954 _leave(" = %d", ret);
955 return ret;
956}
957
958/*
959 * detach a backing block from a page
960 * - cache withdrawal is prevented by the caller
961 */
962void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
963{
964 struct cachefiles_object *object;
965 struct cachefiles_cache *cache;
966
967 object = container_of(_object, struct cachefiles_object, fscache);
968 cache = container_of(object->fscache.cache,
969 struct cachefiles_cache, cache);
970
971 _enter("%p,{%lu}", object, page->index);
972
973 spin_unlock(&object->fscache.cookie->lock);
974}
This page took 0.376418 seconds and 5 git commands to generate.