NFS: Read pages from FS-Cache into an NFS inode
[deliverable/linux.git] / fs / nfs / read.c
1 /*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
22
23 #include <asm/system.h>
24
25 #include "internal.h"
26 #include "iostat.h"
27 #include "fscache.h"
28
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
30
31 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
33 static const struct rpc_call_ops nfs_read_partial_ops;
34 static const struct rpc_call_ops nfs_read_full_ops;
35
36 static struct kmem_cache *nfs_rdata_cachep;
37 static mempool_t *nfs_rdata_mempool;
38
39 #define MIN_POOL_READ (32)
40
41 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
42 {
43 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
44
45 if (p) {
46 memset(p, 0, sizeof(*p));
47 INIT_LIST_HEAD(&p->pages);
48 p->npages = pagecount;
49 if (pagecount <= ARRAY_SIZE(p->page_array))
50 p->pagevec = p->page_array;
51 else {
52 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
53 if (!p->pagevec) {
54 mempool_free(p, nfs_rdata_mempool);
55 p = NULL;
56 }
57 }
58 }
59 return p;
60 }
61
62 static void nfs_readdata_free(struct nfs_read_data *p)
63 {
64 if (p && (p->pagevec != &p->page_array[0]))
65 kfree(p->pagevec);
66 mempool_free(p, nfs_rdata_mempool);
67 }
68
69 void nfs_readdata_release(void *data)
70 {
71 struct nfs_read_data *rdata = data;
72
73 put_nfs_open_context(rdata->args.context);
74 nfs_readdata_free(rdata);
75 }
76
77 static
78 int nfs_return_empty_page(struct page *page)
79 {
80 zero_user(page, 0, PAGE_CACHE_SIZE);
81 SetPageUptodate(page);
82 unlock_page(page);
83 return 0;
84 }
85
86 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
87 {
88 unsigned int remainder = data->args.count - data->res.count;
89 unsigned int base = data->args.pgbase + data->res.count;
90 unsigned int pglen;
91 struct page **pages;
92
93 if (data->res.eof == 0 || remainder == 0)
94 return;
95 /*
96 * Note: "remainder" can never be negative, since we check for
97 * this in the XDR code.
98 */
99 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
100 base &= ~PAGE_CACHE_MASK;
101 pglen = PAGE_CACHE_SIZE - base;
102 for (;;) {
103 if (remainder <= pglen) {
104 zero_user(*pages, base, remainder);
105 break;
106 }
107 zero_user(*pages, base, pglen);
108 pages++;
109 remainder -= pglen;
110 pglen = PAGE_CACHE_SIZE;
111 base = 0;
112 }
113 }
114
115 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
116 struct page *page)
117 {
118 LIST_HEAD(one_request);
119 struct nfs_page *new;
120 unsigned int len;
121
122 len = nfs_page_length(page);
123 if (len == 0)
124 return nfs_return_empty_page(page);
125 new = nfs_create_request(ctx, inode, page, 0, len);
126 if (IS_ERR(new)) {
127 unlock_page(page);
128 return PTR_ERR(new);
129 }
130 if (len < PAGE_CACHE_SIZE)
131 zero_user_segment(page, len, PAGE_CACHE_SIZE);
132
133 nfs_list_add_request(new, &one_request);
134 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
135 nfs_pagein_multi(inode, &one_request, 1, len, 0);
136 else
137 nfs_pagein_one(inode, &one_request, 1, len, 0);
138 return 0;
139 }
140
141 static void nfs_readpage_release(struct nfs_page *req)
142 {
143 unlock_page(req->wb_page);
144
145 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
146 req->wb_context->path.dentry->d_inode->i_sb->s_id,
147 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
148 req->wb_bytes,
149 (long long)req_offset(req));
150 nfs_clear_request(req);
151 nfs_release_request(req);
152 }
153
154 /*
155 * Set up the NFS read request struct
156 */
157 static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
158 const struct rpc_call_ops *call_ops,
159 unsigned int count, unsigned int offset)
160 {
161 struct inode *inode = req->wb_context->path.dentry->d_inode;
162 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
163 struct rpc_task *task;
164 struct rpc_message msg = {
165 .rpc_argp = &data->args,
166 .rpc_resp = &data->res,
167 .rpc_cred = req->wb_context->cred,
168 };
169 struct rpc_task_setup task_setup_data = {
170 .task = &data->task,
171 .rpc_client = NFS_CLIENT(inode),
172 .rpc_message = &msg,
173 .callback_ops = call_ops,
174 .callback_data = data,
175 .workqueue = nfsiod_workqueue,
176 .flags = RPC_TASK_ASYNC | swap_flags,
177 };
178
179 data->req = req;
180 data->inode = inode;
181 data->cred = msg.rpc_cred;
182
183 data->args.fh = NFS_FH(inode);
184 data->args.offset = req_offset(req) + offset;
185 data->args.pgbase = req->wb_pgbase + offset;
186 data->args.pages = data->pagevec;
187 data->args.count = count;
188 data->args.context = get_nfs_open_context(req->wb_context);
189
190 data->res.fattr = &data->fattr;
191 data->res.count = count;
192 data->res.eof = 0;
193 nfs_fattr_init(&data->fattr);
194
195 /* Set up the initial task struct. */
196 NFS_PROTO(inode)->read_setup(data, &msg);
197
198 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
199 data->task.tk_pid,
200 inode->i_sb->s_id,
201 (long long)NFS_FILEID(inode),
202 count,
203 (unsigned long long)data->args.offset);
204
205 task = rpc_run_task(&task_setup_data);
206 if (IS_ERR(task))
207 return PTR_ERR(task);
208 rpc_put_task(task);
209 return 0;
210 }
211
212 static void
213 nfs_async_read_error(struct list_head *head)
214 {
215 struct nfs_page *req;
216
217 while (!list_empty(head)) {
218 req = nfs_list_entry(head->next);
219 nfs_list_remove_request(req);
220 SetPageError(req->wb_page);
221 nfs_readpage_release(req);
222 }
223 }
224
225 /*
226 * Generate multiple requests to fill a single page.
227 *
228 * We optimize to reduce the number of read operations on the wire. If we
229 * detect that we're reading a page, or an area of a page, that is past the
230 * end of file, we do not generate NFS read operations but just clear the
231 * parts of the page that would have come back zero from the server anyway.
232 *
233 * We rely on the cached value of i_size to make this determination; another
234 * client can fill pages on the server past our cached end-of-file, but we
235 * won't see the new data until our attribute cache is updated. This is more
236 * or less conventional NFS client behavior.
237 */
238 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
239 {
240 struct nfs_page *req = nfs_list_entry(head->next);
241 struct page *page = req->wb_page;
242 struct nfs_read_data *data;
243 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
244 unsigned int offset;
245 int requests = 0;
246 int ret = 0;
247 LIST_HEAD(list);
248
249 nfs_list_remove_request(req);
250
251 nbytes = count;
252 do {
253 size_t len = min(nbytes,rsize);
254
255 data = nfs_readdata_alloc(1);
256 if (!data)
257 goto out_bad;
258 list_add(&data->pages, &list);
259 requests++;
260 nbytes -= len;
261 } while(nbytes != 0);
262 atomic_set(&req->wb_complete, requests);
263
264 ClearPageError(page);
265 offset = 0;
266 nbytes = count;
267 do {
268 int ret2;
269
270 data = list_entry(list.next, struct nfs_read_data, pages);
271 list_del_init(&data->pages);
272
273 data->pagevec[0] = page;
274
275 if (nbytes < rsize)
276 rsize = nbytes;
277 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
278 rsize, offset);
279 if (ret == 0)
280 ret = ret2;
281 offset += rsize;
282 nbytes -= rsize;
283 } while (nbytes != 0);
284
285 return ret;
286
287 out_bad:
288 while (!list_empty(&list)) {
289 data = list_entry(list.next, struct nfs_read_data, pages);
290 list_del(&data->pages);
291 nfs_readdata_free(data);
292 }
293 SetPageError(page);
294 nfs_readpage_release(req);
295 return -ENOMEM;
296 }
297
298 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
299 {
300 struct nfs_page *req;
301 struct page **pages;
302 struct nfs_read_data *data;
303 int ret = -ENOMEM;
304
305 data = nfs_readdata_alloc(npages);
306 if (!data)
307 goto out_bad;
308
309 pages = data->pagevec;
310 while (!list_empty(head)) {
311 req = nfs_list_entry(head->next);
312 nfs_list_remove_request(req);
313 nfs_list_add_request(req, &data->pages);
314 ClearPageError(req->wb_page);
315 *pages++ = req->wb_page;
316 }
317 req = nfs_list_entry(data->pages.next);
318
319 return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
320 out_bad:
321 nfs_async_read_error(head);
322 return ret;
323 }
324
325 /*
326 * This is the callback from RPC telling us whether a reply was
327 * received or some error occurred (timeout or socket shutdown).
328 */
329 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
330 {
331 int status;
332
333 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
334 task->tk_status);
335
336 status = NFS_PROTO(data->inode)->read_done(task, data);
337 if (status != 0)
338 return status;
339
340 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
341
342 if (task->tk_status == -ESTALE) {
343 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
344 nfs_mark_for_revalidate(data->inode);
345 }
346 return 0;
347 }
348
349 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
350 {
351 struct nfs_readargs *argp = &data->args;
352 struct nfs_readres *resp = &data->res;
353
354 if (resp->eof || resp->count == argp->count)
355 return;
356
357 /* This is a short read! */
358 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
359 /* Has the server at least made some progress? */
360 if (resp->count == 0)
361 return;
362
363 /* Yes, so retry the read at the end of the data */
364 argp->offset += resp->count;
365 argp->pgbase += resp->count;
366 argp->count -= resp->count;
367 rpc_restart_call(task);
368 }
369
370 /*
371 * Handle a read reply that fills part of a page.
372 */
373 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
374 {
375 struct nfs_read_data *data = calldata;
376
377 if (nfs_readpage_result(task, data) != 0)
378 return;
379 if (task->tk_status < 0)
380 return;
381
382 nfs_readpage_truncate_uninitialised_page(data);
383 nfs_readpage_retry(task, data);
384 }
385
386 static void nfs_readpage_release_partial(void *calldata)
387 {
388 struct nfs_read_data *data = calldata;
389 struct nfs_page *req = data->req;
390 struct page *page = req->wb_page;
391 int status = data->task.tk_status;
392
393 if (status < 0)
394 SetPageError(page);
395
396 if (atomic_dec_and_test(&req->wb_complete)) {
397 if (!PageError(page))
398 SetPageUptodate(page);
399 nfs_readpage_release(req);
400 }
401 nfs_readdata_release(calldata);
402 }
403
404 static const struct rpc_call_ops nfs_read_partial_ops = {
405 .rpc_call_done = nfs_readpage_result_partial,
406 .rpc_release = nfs_readpage_release_partial,
407 };
408
409 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
410 {
411 unsigned int count = data->res.count;
412 unsigned int base = data->args.pgbase;
413 struct page **pages;
414
415 if (data->res.eof)
416 count = data->args.count;
417 if (unlikely(count == 0))
418 return;
419 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
420 base &= ~PAGE_CACHE_MASK;
421 count += base;
422 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
423 SetPageUptodate(*pages);
424 if (count == 0)
425 return;
426 /* Was this a short read? */
427 if (data->res.eof || data->res.count == data->args.count)
428 SetPageUptodate(*pages);
429 }
430
431 /*
432 * This is the callback from RPC telling us whether a reply was
433 * received or some error occurred (timeout or socket shutdown).
434 */
435 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
436 {
437 struct nfs_read_data *data = calldata;
438
439 if (nfs_readpage_result(task, data) != 0)
440 return;
441 if (task->tk_status < 0)
442 return;
443 /*
444 * Note: nfs_readpage_retry may change the values of
445 * data->args. In the multi-page case, we therefore need
446 * to ensure that we call nfs_readpage_set_pages_uptodate()
447 * first.
448 */
449 nfs_readpage_truncate_uninitialised_page(data);
450 nfs_readpage_set_pages_uptodate(data);
451 nfs_readpage_retry(task, data);
452 }
453
454 static void nfs_readpage_release_full(void *calldata)
455 {
456 struct nfs_read_data *data = calldata;
457
458 while (!list_empty(&data->pages)) {
459 struct nfs_page *req = nfs_list_entry(data->pages.next);
460
461 nfs_list_remove_request(req);
462 nfs_readpage_release(req);
463 }
464 nfs_readdata_release(calldata);
465 }
466
467 static const struct rpc_call_ops nfs_read_full_ops = {
468 .rpc_call_done = nfs_readpage_result_full,
469 .rpc_release = nfs_readpage_release_full,
470 };
471
472 /*
473 * Read a page over NFS.
474 * We read the page synchronously in the following case:
475 * - The error flag is set for this page. This happens only when a
476 * previous async read operation failed.
477 */
478 int nfs_readpage(struct file *file, struct page *page)
479 {
480 struct nfs_open_context *ctx;
481 struct inode *inode = page->mapping->host;
482 int error;
483
484 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
485 page, PAGE_CACHE_SIZE, page->index);
486 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
487 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
488
489 /*
490 * Try to flush any pending writes to the file..
491 *
492 * NOTE! Because we own the page lock, there cannot
493 * be any new pending writes generated at this point
494 * for this page (other pages can be written to).
495 */
496 error = nfs_wb_page(inode, page);
497 if (error)
498 goto out_unlock;
499 if (PageUptodate(page))
500 goto out_unlock;
501
502 error = -ESTALE;
503 if (NFS_STALE(inode))
504 goto out_unlock;
505
506 if (file == NULL) {
507 error = -EBADF;
508 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
509 if (ctx == NULL)
510 goto out_unlock;
511 } else
512 ctx = get_nfs_open_context(nfs_file_open_context(file));
513
514 if (!IS_SYNC(inode)) {
515 error = nfs_readpage_from_fscache(ctx, inode, page);
516 if (error == 0)
517 goto out;
518 }
519
520 error = nfs_readpage_async(ctx, inode, page);
521
522 out:
523 put_nfs_open_context(ctx);
524 return error;
525 out_unlock:
526 unlock_page(page);
527 return error;
528 }
529
530 struct nfs_readdesc {
531 struct nfs_pageio_descriptor *pgio;
532 struct nfs_open_context *ctx;
533 };
534
535 static int
536 readpage_async_filler(void *data, struct page *page)
537 {
538 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
539 struct inode *inode = page->mapping->host;
540 struct nfs_page *new;
541 unsigned int len;
542 int error;
543
544 len = nfs_page_length(page);
545 if (len == 0)
546 return nfs_return_empty_page(page);
547
548 new = nfs_create_request(desc->ctx, inode, page, 0, len);
549 if (IS_ERR(new))
550 goto out_error;
551
552 if (len < PAGE_CACHE_SIZE)
553 zero_user_segment(page, len, PAGE_CACHE_SIZE);
554 if (!nfs_pageio_add_request(desc->pgio, new)) {
555 error = desc->pgio->pg_error;
556 goto out_unlock;
557 }
558 return 0;
559 out_error:
560 error = PTR_ERR(new);
561 SetPageError(page);
562 out_unlock:
563 unlock_page(page);
564 return error;
565 }
566
567 int nfs_readpages(struct file *filp, struct address_space *mapping,
568 struct list_head *pages, unsigned nr_pages)
569 {
570 struct nfs_pageio_descriptor pgio;
571 struct nfs_readdesc desc = {
572 .pgio = &pgio,
573 };
574 struct inode *inode = mapping->host;
575 struct nfs_server *server = NFS_SERVER(inode);
576 size_t rsize = server->rsize;
577 unsigned long npages;
578 int ret = -ESTALE;
579
580 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
581 inode->i_sb->s_id,
582 (long long)NFS_FILEID(inode),
583 nr_pages);
584 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
585
586 if (NFS_STALE(inode))
587 goto out;
588
589 if (filp == NULL) {
590 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
591 if (desc.ctx == NULL)
592 return -EBADF;
593 } else
594 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
595
596 /* attempt to read as many of the pages as possible from the cache
597 * - this returns -ENOBUFS immediately if the cookie is negative
598 */
599 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
600 pages, &nr_pages);
601 if (ret == 0)
602 goto read_complete; /* all pages were read */
603
604 if (rsize < PAGE_CACHE_SIZE)
605 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
606 else
607 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
608
609 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
610
611 nfs_pageio_complete(&pgio);
612 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
613 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
614 read_complete:
615 put_nfs_open_context(desc.ctx);
616 out:
617 return ret;
618 }
619
620 int __init nfs_init_readpagecache(void)
621 {
622 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
623 sizeof(struct nfs_read_data),
624 0, SLAB_HWCACHE_ALIGN,
625 NULL);
626 if (nfs_rdata_cachep == NULL)
627 return -ENOMEM;
628
629 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
630 nfs_rdata_cachep);
631 if (nfs_rdata_mempool == NULL)
632 return -ENOMEM;
633
634 return 0;
635 }
636
637 void nfs_destroy_readpagecache(void)
638 {
639 mempool_destroy(nfs_rdata_mempool);
640 kmem_cache_destroy(nfs_rdata_cachep);
641 }
This page took 0.042485 seconds and 5 git commands to generate.