NFS: Another cleanup of the read/write request coalescing code
[deliverable/linux.git] / fs / nfs / read.c
1 /*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
22
23 #include <asm/system.h>
24
25 #include "internal.h"
26 #include "iostat.h"
27
28 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
29
30 static int nfs_pagein_multi(struct inode *, struct list_head *, size_t, int);
31 static int nfs_pagein_one(struct inode *, struct list_head *, size_t, int);
32 static const struct rpc_call_ops nfs_read_partial_ops;
33 static const struct rpc_call_ops nfs_read_full_ops;
34
35 static struct kmem_cache *nfs_rdata_cachep;
36 static mempool_t *nfs_rdata_mempool;
37
38 #define MIN_POOL_READ (32)
39
40 struct nfs_read_data *nfs_readdata_alloc(size_t len)
41 {
42 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
43 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
44
45 if (p) {
46 memset(p, 0, sizeof(*p));
47 INIT_LIST_HEAD(&p->pages);
48 p->npages = pagecount;
49 if (pagecount <= ARRAY_SIZE(p->page_array))
50 p->pagevec = p->page_array;
51 else {
52 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
53 if (!p->pagevec) {
54 mempool_free(p, nfs_rdata_mempool);
55 p = NULL;
56 }
57 }
58 }
59 return p;
60 }
61
62 static void nfs_readdata_rcu_free(struct rcu_head *head)
63 {
64 struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
65 if (p && (p->pagevec != &p->page_array[0]))
66 kfree(p->pagevec);
67 mempool_free(p, nfs_rdata_mempool);
68 }
69
70 static void nfs_readdata_free(struct nfs_read_data *rdata)
71 {
72 call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
73 }
74
75 void nfs_readdata_release(void *data)
76 {
77 nfs_readdata_free(data);
78 }
79
80 static
81 int nfs_return_empty_page(struct page *page)
82 {
83 memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
84 SetPageUptodate(page);
85 unlock_page(page);
86 return 0;
87 }
88
89 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
90 {
91 unsigned int remainder = data->args.count - data->res.count;
92 unsigned int base = data->args.pgbase + data->res.count;
93 unsigned int pglen;
94 struct page **pages;
95
96 if (data->res.eof == 0 || remainder == 0)
97 return;
98 /*
99 * Note: "remainder" can never be negative, since we check for
100 * this in the XDR code.
101 */
102 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
103 base &= ~PAGE_CACHE_MASK;
104 pglen = PAGE_CACHE_SIZE - base;
105 for (;;) {
106 if (remainder <= pglen) {
107 memclear_highpage_flush(*pages, base, remainder);
108 break;
109 }
110 memclear_highpage_flush(*pages, base, pglen);
111 pages++;
112 remainder -= pglen;
113 pglen = PAGE_CACHE_SIZE;
114 base = 0;
115 }
116 }
117
118 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
119 struct page *page)
120 {
121 LIST_HEAD(one_request);
122 struct nfs_page *new;
123 unsigned int len;
124
125 len = nfs_page_length(page);
126 if (len == 0)
127 return nfs_return_empty_page(page);
128 new = nfs_create_request(ctx, inode, page, 0, len);
129 if (IS_ERR(new)) {
130 unlock_page(page);
131 return PTR_ERR(new);
132 }
133 if (len < PAGE_CACHE_SIZE)
134 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
135
136 nfs_list_add_request(new, &one_request);
137 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
138 nfs_pagein_multi(inode, &one_request, len, 0);
139 else
140 nfs_pagein_one(inode, &one_request, len, 0);
141 return 0;
142 }
143
144 static void nfs_readpage_release(struct nfs_page *req)
145 {
146 unlock_page(req->wb_page);
147
148 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
149 req->wb_context->dentry->d_inode->i_sb->s_id,
150 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
151 req->wb_bytes,
152 (long long)req_offset(req));
153 nfs_clear_request(req);
154 nfs_release_request(req);
155 }
156
157 /*
158 * Set up the NFS read request struct
159 */
160 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
161 const struct rpc_call_ops *call_ops,
162 unsigned int count, unsigned int offset)
163 {
164 struct inode *inode;
165 int flags;
166
167 data->req = req;
168 data->inode = inode = req->wb_context->dentry->d_inode;
169 data->cred = req->wb_context->cred;
170
171 data->args.fh = NFS_FH(inode);
172 data->args.offset = req_offset(req) + offset;
173 data->args.pgbase = req->wb_pgbase + offset;
174 data->args.pages = data->pagevec;
175 data->args.count = count;
176 data->args.context = req->wb_context;
177
178 data->res.fattr = &data->fattr;
179 data->res.count = count;
180 data->res.eof = 0;
181 nfs_fattr_init(&data->fattr);
182
183 /* Set up the initial task struct. */
184 flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
185 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
186 NFS_PROTO(inode)->read_setup(data);
187
188 data->task.tk_cookie = (unsigned long)inode;
189
190 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
191 data->task.tk_pid,
192 inode->i_sb->s_id,
193 (long long)NFS_FILEID(inode),
194 count,
195 (unsigned long long)data->args.offset);
196 }
197
198 static void
199 nfs_async_read_error(struct list_head *head)
200 {
201 struct nfs_page *req;
202
203 while (!list_empty(head)) {
204 req = nfs_list_entry(head->next);
205 nfs_list_remove_request(req);
206 SetPageError(req->wb_page);
207 nfs_readpage_release(req);
208 }
209 }
210
211 /*
212 * Start an async read operation
213 */
214 static void nfs_execute_read(struct nfs_read_data *data)
215 {
216 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
217 sigset_t oldset;
218
219 rpc_clnt_sigmask(clnt, &oldset);
220 rpc_execute(&data->task);
221 rpc_clnt_sigunmask(clnt, &oldset);
222 }
223
224 /*
225 * Generate multiple requests to fill a single page.
226 *
227 * We optimize to reduce the number of read operations on the wire. If we
228 * detect that we're reading a page, or an area of a page, that is past the
229 * end of file, we do not generate NFS read operations but just clear the
230 * parts of the page that would have come back zero from the server anyway.
231 *
232 * We rely on the cached value of i_size to make this determination; another
233 * client can fill pages on the server past our cached end-of-file, but we
234 * won't see the new data until our attribute cache is updated. This is more
235 * or less conventional NFS client behavior.
236 */
237 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, size_t count, int flags)
238 {
239 struct nfs_page *req = nfs_list_entry(head->next);
240 struct page *page = req->wb_page;
241 struct nfs_read_data *data;
242 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
243 unsigned int offset;
244 int requests = 0;
245 LIST_HEAD(list);
246
247 nfs_list_remove_request(req);
248
249 nbytes = count;
250 do {
251 size_t len = min(nbytes,rsize);
252
253 data = nfs_readdata_alloc(len);
254 if (!data)
255 goto out_bad;
256 INIT_LIST_HEAD(&data->pages);
257 list_add(&data->pages, &list);
258 requests++;
259 nbytes -= len;
260 } while(nbytes != 0);
261 atomic_set(&req->wb_complete, requests);
262
263 ClearPageError(page);
264 offset = 0;
265 nbytes = count;
266 do {
267 data = list_entry(list.next, struct nfs_read_data, pages);
268 list_del_init(&data->pages);
269
270 data->pagevec[0] = page;
271
272 if (nbytes < rsize)
273 rsize = nbytes;
274 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
275 rsize, offset);
276 offset += rsize;
277 nbytes -= rsize;
278 nfs_execute_read(data);
279 } while (nbytes != 0);
280
281 return 0;
282
283 out_bad:
284 while (!list_empty(&list)) {
285 data = list_entry(list.next, struct nfs_read_data, pages);
286 list_del(&data->pages);
287 nfs_readdata_free(data);
288 }
289 SetPageError(page);
290 nfs_readpage_release(req);
291 return -ENOMEM;
292 }
293
294 static int nfs_pagein_one(struct inode *inode, struct list_head *head, size_t count, int flags)
295 {
296 struct nfs_page *req;
297 struct page **pages;
298 struct nfs_read_data *data;
299
300 data = nfs_readdata_alloc(count);
301 if (!data)
302 goto out_bad;
303
304 INIT_LIST_HEAD(&data->pages);
305 pages = data->pagevec;
306 while (!list_empty(head)) {
307 req = nfs_list_entry(head->next);
308 nfs_list_remove_request(req);
309 nfs_list_add_request(req, &data->pages);
310 ClearPageError(req->wb_page);
311 *pages++ = req->wb_page;
312 }
313 req = nfs_list_entry(data->pages.next);
314
315 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
316
317 nfs_execute_read(data);
318 return 0;
319 out_bad:
320 nfs_async_read_error(head);
321 return -ENOMEM;
322 }
323
324 static int
325 nfs_pagein_list(struct inode *inode, struct list_head *head, unsigned int rsize)
326 {
327 struct nfs_pageio_descriptor desc;
328 unsigned int pages = 0;
329 int error = 0;
330
331 if (rsize < PAGE_CACHE_SIZE)
332 nfs_pageio_init(&desc, inode, nfs_pagein_multi, rsize, 0);
333 else
334 nfs_pageio_init(&desc, inode, nfs_pagein_one, rsize, 0);
335
336 nfs_pageio_add_list(&desc, head);
337 nfs_pageio_complete(&desc);
338 pages += (desc.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
339
340 nfs_async_read_error(head);
341 if (error >= 0)
342 return pages;
343 return error;
344 }
345
346 /*
347 * This is the callback from RPC telling us whether a reply was
348 * received or some error occurred (timeout or socket shutdown).
349 */
350 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
351 {
352 int status;
353
354 dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
355 task->tk_status);
356
357 status = NFS_PROTO(data->inode)->read_done(task, data);
358 if (status != 0)
359 return status;
360
361 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
362
363 if (task->tk_status == -ESTALE) {
364 set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
365 nfs_mark_for_revalidate(data->inode);
366 }
367 spin_lock(&data->inode->i_lock);
368 NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
369 spin_unlock(&data->inode->i_lock);
370 return 0;
371 }
372
373 static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
374 {
375 struct nfs_readargs *argp = &data->args;
376 struct nfs_readres *resp = &data->res;
377
378 if (resp->eof || resp->count == argp->count)
379 return 0;
380
381 /* This is a short read! */
382 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
383 /* Has the server at least made some progress? */
384 if (resp->count == 0)
385 return 0;
386
387 /* Yes, so retry the read at the end of the data */
388 argp->offset += resp->count;
389 argp->pgbase += resp->count;
390 argp->count -= resp->count;
391 rpc_restart_call(task);
392 return -EAGAIN;
393 }
394
395 /*
396 * Handle a read reply that fills part of a page.
397 */
398 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
399 {
400 struct nfs_read_data *data = calldata;
401 struct nfs_page *req = data->req;
402 struct page *page = req->wb_page;
403
404 if (nfs_readpage_result(task, data) != 0)
405 return;
406
407 if (likely(task->tk_status >= 0)) {
408 nfs_readpage_truncate_uninitialised_page(data);
409 if (nfs_readpage_retry(task, data) != 0)
410 return;
411 }
412 if (unlikely(task->tk_status < 0))
413 SetPageError(page);
414 if (atomic_dec_and_test(&req->wb_complete)) {
415 if (!PageError(page))
416 SetPageUptodate(page);
417 nfs_readpage_release(req);
418 }
419 }
420
421 static const struct rpc_call_ops nfs_read_partial_ops = {
422 .rpc_call_done = nfs_readpage_result_partial,
423 .rpc_release = nfs_readdata_release,
424 };
425
426 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
427 {
428 unsigned int count = data->res.count;
429 unsigned int base = data->args.pgbase;
430 struct page **pages;
431
432 if (data->res.eof)
433 count = data->args.count;
434 if (unlikely(count == 0))
435 return;
436 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
437 base &= ~PAGE_CACHE_MASK;
438 count += base;
439 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
440 SetPageUptodate(*pages);
441 if (count == 0)
442 return;
443 /* Was this a short read? */
444 if (data->res.eof || data->res.count == data->args.count)
445 SetPageUptodate(*pages);
446 }
447
448 /*
449 * This is the callback from RPC telling us whether a reply was
450 * received or some error occurred (timeout or socket shutdown).
451 */
452 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
453 {
454 struct nfs_read_data *data = calldata;
455
456 if (nfs_readpage_result(task, data) != 0)
457 return;
458 /*
459 * Note: nfs_readpage_retry may change the values of
460 * data->args. In the multi-page case, we therefore need
461 * to ensure that we call nfs_readpage_set_pages_uptodate()
462 * first.
463 */
464 if (likely(task->tk_status >= 0)) {
465 nfs_readpage_truncate_uninitialised_page(data);
466 nfs_readpage_set_pages_uptodate(data);
467 if (nfs_readpage_retry(task, data) != 0)
468 return;
469 }
470 while (!list_empty(&data->pages)) {
471 struct nfs_page *req = nfs_list_entry(data->pages.next);
472
473 nfs_list_remove_request(req);
474 nfs_readpage_release(req);
475 }
476 }
477
478 static const struct rpc_call_ops nfs_read_full_ops = {
479 .rpc_call_done = nfs_readpage_result_full,
480 .rpc_release = nfs_readdata_release,
481 };
482
483 /*
484 * Read a page over NFS.
485 * We read the page synchronously in the following case:
486 * - The error flag is set for this page. This happens only when a
487 * previous async read operation failed.
488 */
489 int nfs_readpage(struct file *file, struct page *page)
490 {
491 struct nfs_open_context *ctx;
492 struct inode *inode = page->mapping->host;
493 int error;
494
495 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
496 page, PAGE_CACHE_SIZE, page->index);
497 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
498 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
499
500 /*
501 * Try to flush any pending writes to the file..
502 *
503 * NOTE! Because we own the page lock, there cannot
504 * be any new pending writes generated at this point
505 * for this page (other pages can be written to).
506 */
507 error = nfs_wb_page(inode, page);
508 if (error)
509 goto out_error;
510
511 error = -ESTALE;
512 if (NFS_STALE(inode))
513 goto out_error;
514
515 if (file == NULL) {
516 error = -EBADF;
517 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
518 if (ctx == NULL)
519 goto out_error;
520 } else
521 ctx = get_nfs_open_context((struct nfs_open_context *)
522 file->private_data);
523
524 error = nfs_readpage_async(ctx, inode, page);
525
526 put_nfs_open_context(ctx);
527 return error;
528
529 out_error:
530 unlock_page(page);
531 return error;
532 }
533
534 struct nfs_readdesc {
535 struct list_head *head;
536 struct nfs_open_context *ctx;
537 };
538
539 static int
540 readpage_async_filler(void *data, struct page *page)
541 {
542 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
543 struct inode *inode = page->mapping->host;
544 struct nfs_page *new;
545 unsigned int len;
546
547 nfs_wb_page(inode, page);
548 len = nfs_page_length(page);
549 if (len == 0)
550 return nfs_return_empty_page(page);
551 new = nfs_create_request(desc->ctx, inode, page, 0, len);
552 if (IS_ERR(new)) {
553 SetPageError(page);
554 unlock_page(page);
555 return PTR_ERR(new);
556 }
557 if (len < PAGE_CACHE_SIZE)
558 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
559 nfs_list_add_request(new, desc->head);
560 return 0;
561 }
562
563 int nfs_readpages(struct file *filp, struct address_space *mapping,
564 struct list_head *pages, unsigned nr_pages)
565 {
566 LIST_HEAD(head);
567 struct nfs_readdesc desc = {
568 .head = &head,
569 };
570 struct inode *inode = mapping->host;
571 struct nfs_server *server = NFS_SERVER(inode);
572 int ret = -ESTALE;
573
574 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
575 inode->i_sb->s_id,
576 (long long)NFS_FILEID(inode),
577 nr_pages);
578 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
579
580 if (NFS_STALE(inode))
581 goto out;
582
583 if (filp == NULL) {
584 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
585 if (desc.ctx == NULL)
586 return -EBADF;
587 } else
588 desc.ctx = get_nfs_open_context((struct nfs_open_context *)
589 filp->private_data);
590 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
591 if (!list_empty(&head)) {
592 int err = nfs_pagein_list(inode, &head, server->rsize);
593 if (!ret)
594 nfs_add_stats(inode, NFSIOS_READPAGES, err);
595 ret = err;
596 }
597 put_nfs_open_context(desc.ctx);
598 out:
599 return ret;
600 }
601
602 int __init nfs_init_readpagecache(void)
603 {
604 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
605 sizeof(struct nfs_read_data),
606 0, SLAB_HWCACHE_ALIGN,
607 NULL, NULL);
608 if (nfs_rdata_cachep == NULL)
609 return -ENOMEM;
610
611 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
612 nfs_rdata_cachep);
613 if (nfs_rdata_mempool == NULL)
614 return -ENOMEM;
615
616 return 0;
617 }
618
619 void nfs_destroy_readpagecache(void)
620 {
621 mempool_destroy(nfs_rdata_mempool);
622 kmem_cache_destroy(nfs_rdata_cachep);
623 }
This page took 0.045456 seconds and 5 git commands to generate.