NFS: Clean up nfs read and write error paths
[deliverable/linux.git] / fs / nfs / direct.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
88467055 10 * (multiple copies of the same instance running on separate hosts)
1da177e4 11 * implement their own cache coherency protocol that subsumes file
88467055
CL
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
1da177e4
LT
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
88467055 37 * 04 May 2005 support O_DIRECT with aio --cel
1da177e4
LT
38 *
39 */
40
1da177e4
LT
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
1da177e4
LT
44#include <linux/file.h>
45#include <linux/pagemap.h>
46#include <linux/kref.h>
5a0e3ad6 47#include <linux/slab.h>
7ec10f26 48#include <linux/task_io_accounting_ops.h>
1da177e4
LT
49
50#include <linux/nfs_fs.h>
51#include <linux/nfs_page.h>
52#include <linux/sunrpc/clnt.h>
53
1da177e4 54#include <asm/uaccess.h>
60063497 55#include <linux/atomic.h>
1da177e4 56
8d5658c9 57#include "internal.h"
91d5b470 58#include "iostat.h"
1763da12 59#include "pnfs.h"
91d5b470 60
1da177e4 61#define NFSDBG_FACILITY NFSDBG_VFS
1da177e4 62
e18b890b 63static struct kmem_cache *nfs_direct_cachep;
1da177e4
LT
64
65/*
66 * This represents a set of asynchronous requests that we're waiting on
67 */
68struct nfs_direct_req {
69 struct kref kref; /* release manager */
15ce4a0c
CL
70
71 /* I/O parameters */
a8881f5a 72 struct nfs_open_context *ctx; /* file open context info */
f11ac8db 73 struct nfs_lock_context *l_ctx; /* Lock context info */
99514f8f 74 struct kiocb * iocb; /* controlling i/o request */
88467055 75 struct inode * inode; /* target file of i/o */
15ce4a0c
CL
76
77 /* completion state */
607f31e8 78 atomic_t io_count; /* i/os we're waiting for */
15ce4a0c 79 spinlock_t lock; /* protect completion state */
15ce4a0c 80 ssize_t count, /* bytes actually processed */
1da177e4 81 error; /* any reported error */
d72b7a6b 82 struct completion completion; /* wait for i/o completion */
fad61490
TM
83
84 /* commit state */
1763da12
FI
85 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
86 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
87 struct work_struct work;
fad61490
TM
88 int flags;
89#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
90#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
91 struct nfs_writeverf verf; /* unstable write verifier */
1da177e4
LT
92};
93
1763da12
FI
94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
fad61490 96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
1763da12 97static void nfs_direct_write_schedule_work(struct work_struct *work);
607f31e8
TM
98
99static inline void get_dreq(struct nfs_direct_req *dreq)
100{
101 atomic_inc(&dreq->io_count);
102}
103
104static inline int put_dreq(struct nfs_direct_req *dreq)
105{
106 return atomic_dec_and_test(&dreq->io_count);
107}
108
1da177e4 109/**
b8a32e2b
CL
110 * nfs_direct_IO - NFS address space operation for direct I/O
111 * @rw: direction (read or write)
112 * @iocb: target I/O control block
113 * @iov: array of vectors that define I/O buffer
114 * @pos: offset in file to begin the operation
115 * @nr_segs: size of iovec array
116 *
117 * The presence of this routine in the address space ops vector means
118 * the NFS client supports direct I/O. However, we shunt off direct
119 * read and write requests before the VFS gets them, so this method
120 * should never be called.
1da177e4 121 */
b8a32e2b
CL
122ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123{
b8a32e2b 124 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
01cce933 125 iocb->ki_filp->f_path.dentry->d_name.name,
e99170ff 126 (long long) pos, nr_segs);
b8a32e2b
CL
127
128 return -EINVAL;
129}
130
749e146e 131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
9c93ab7d 132{
749e146e 133 unsigned int i;
607f31e8
TM
134 for (i = 0; i < npages; i++)
135 page_cache_release(pages[i]);
6b45d858
TM
136}
137
1763da12
FI
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 struct nfs_direct_req *dreq)
140{
141 cinfo->lock = &dreq->lock;
142 cinfo->mds = &dreq->mds_cinfo;
143 cinfo->ds = &dreq->ds_cinfo;
144 cinfo->dreq = dreq;
145 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
93619e59 148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
1da177e4 149{
93619e59
CL
150 struct nfs_direct_req *dreq;
151
292f3eee 152 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
93619e59
CL
153 if (!dreq)
154 return NULL;
155
156 kref_init(&dreq->kref);
607f31e8 157 kref_get(&dreq->kref);
d72b7a6b 158 init_completion(&dreq->completion);
1763da12
FI
159 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
160 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
15ce4a0c 161 spin_lock_init(&dreq->lock);
93619e59
CL
162
163 return dreq;
1da177e4
LT
164}
165
b4946ffb 166static void nfs_direct_req_free(struct kref *kref)
1da177e4
LT
167{
168 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
a8881f5a 169
f11ac8db
TM
170 if (dreq->l_ctx != NULL)
171 nfs_put_lock_context(dreq->l_ctx);
a8881f5a
TM
172 if (dreq->ctx != NULL)
173 put_nfs_open_context(dreq->ctx);
1da177e4
LT
174 kmem_cache_free(nfs_direct_cachep, dreq);
175}
176
b4946ffb
TM
177static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178{
179 kref_put(&dreq->kref, nfs_direct_req_free);
180}
181
bc0fb201
CL
182/*
183 * Collects and returns the final error value/byte-count.
184 */
185static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186{
15ce4a0c 187 ssize_t result = -EIOCBQUEUED;
bc0fb201
CL
188
189 /* Async requests don't wait here */
190 if (dreq->iocb)
191 goto out;
192
150030b7 193 result = wait_for_completion_killable(&dreq->completion);
bc0fb201
CL
194
195 if (!result)
15ce4a0c 196 result = dreq->error;
bc0fb201 197 if (!result)
15ce4a0c 198 result = dreq->count;
bc0fb201
CL
199
200out:
bc0fb201
CL
201 return (ssize_t) result;
202}
203
63ab46ab 204/*
607f31e8
TM
205 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
206 * the iocb is still valid here if this is a synchronous request.
63ab46ab
CL
207 */
208static void nfs_direct_complete(struct nfs_direct_req *dreq)
209{
63ab46ab 210 if (dreq->iocb) {
15ce4a0c 211 long res = (long) dreq->error;
63ab46ab 212 if (!res)
15ce4a0c 213 res = (long) dreq->count;
63ab46ab 214 aio_complete(dreq->iocb, res, 0);
d72b7a6b
TM
215 }
216 complete_all(&dreq->completion);
63ab46ab 217
b4946ffb 218 nfs_direct_req_release(dreq);
63ab46ab
CL
219}
220
584aa810 221void nfs_direct_readpage_release(struct nfs_page *req)
1da177e4 222{
584aa810
FI
223 dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224 req->wb_context->dentry->d_inode->i_sb->s_id,
225 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226 req->wb_bytes,
227 (long long)req_offset(req));
228 nfs_release_request(req);
fdd1e74c
TM
229}
230
584aa810 231static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
fdd1e74c 232{
584aa810
FI
233 unsigned long bytes = 0;
234 struct nfs_direct_req *dreq = hdr->dreq;
fdd1e74c 235
584aa810
FI
236 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
237 goto out_put;
15ce4a0c
CL
238
239 spin_lock(&dreq->lock);
584aa810
FI
240 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241 dreq->error = hdr->error;
242 else
243 dreq->count += hdr->good_bytes;
244 spin_unlock(&dreq->lock);
245
246 if (!test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
247 while (!list_empty(&hdr->pages)) {
248 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
249 struct page *page = req->wb_page;
250
251 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
252 if (bytes > hdr->good_bytes)
253 zero_user(page, 0, PAGE_SIZE);
254 else if (hdr->good_bytes - bytes < PAGE_SIZE)
255 zero_user_segment(page,
256 hdr->good_bytes & ~PAGE_MASK,
257 PAGE_SIZE);
258 }
259 bytes += req->wb_bytes;
260 nfs_list_remove_request(req);
584aa810
FI
261 if (!PageCompound(page))
262 set_page_dirty(page);
6d74743b 263 nfs_direct_readpage_release(req);
584aa810 264 }
d4a8f367 265 } else {
584aa810
FI
266 while (!list_empty(&hdr->pages)) {
267 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
268
269 if (bytes < hdr->good_bytes)
270 if (!PageCompound(req->wb_page))
271 set_page_dirty(req->wb_page);
272 bytes += req->wb_bytes;
584aa810
FI
273 nfs_list_remove_request(req);
274 nfs_direct_readpage_release(req);
275 }
d4a8f367 276 }
584aa810 277out_put:
607f31e8
TM
278 if (put_dreq(dreq))
279 nfs_direct_complete(dreq);
584aa810 280 hdr->release(hdr);
1da177e4
LT
281}
282
3e9e0ca3 283static void nfs_read_sync_pgio_error(struct list_head *head)
cd841605 284{
584aa810 285 struct nfs_page *req;
cd841605 286
584aa810
FI
287 while (!list_empty(head)) {
288 req = nfs_list_entry(head->next);
289 nfs_list_remove_request(req);
290 nfs_release_request(req);
291 }
cd841605
FI
292}
293
584aa810
FI
294static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
295{
296 get_dreq(hdr->dreq);
297}
298
299static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
3e9e0ca3 300 .error_cleanup = nfs_read_sync_pgio_error,
584aa810
FI
301 .init_hdr = nfs_direct_pgio_init,
302 .completion = nfs_direct_read_completion,
303};
304
d4cc948b 305/*
607f31e8
TM
306 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
307 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
308 * bail and stop sending more reads. Read length accounting is
309 * handled automatically by nfs_direct_read_result(). Otherwise, if
310 * no requests have been sent, just return an error.
1da177e4 311 */
584aa810 312static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946
CL
313 const struct iovec *iov,
314 loff_t pos)
1da177e4 315{
584aa810 316 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 317 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 318 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
319 unsigned long user_addr = (unsigned long)iov->iov_base;
320 size_t count = iov->iov_len;
5dd602f2 321 size_t rsize = NFS_SERVER(inode)->rsize;
607f31e8
TM
322 unsigned int pgbase;
323 int result;
324 ssize_t started = 0;
584aa810
FI
325 struct page **pagevec = NULL;
326 unsigned int npages;
607f31e8 327
1da177e4 328 do {
5dd602f2 329 size_t bytes;
584aa810 330 int i;
1da177e4 331
e9f7bee1 332 pgbase = user_addr & ~PAGE_MASK;
584aa810 333 bytes = min(max(rsize, PAGE_SIZE), count);
e9f7bee1 334
607f31e8 335 result = -ENOMEM;
584aa810
FI
336 npages = nfs_page_array_len(pgbase, bytes);
337 if (!pagevec)
338 pagevec = kmalloc(npages * sizeof(struct page *),
339 GFP_KERNEL);
340 if (!pagevec)
4db6e0b7 341 break;
607f31e8
TM
342 down_read(&current->mm->mmap_sem);
343 result = get_user_pages(current, current->mm, user_addr,
584aa810 344 npages, 1, 0, pagevec, NULL);
607f31e8 345 up_read(&current->mm->mmap_sem);
584aa810 346 if (result < 0)
749e146e 347 break;
584aa810 348 if ((unsigned)result < npages) {
d9df8d6b
TM
349 bytes = result * PAGE_SIZE;
350 if (bytes <= pgbase) {
584aa810 351 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
352 break;
353 }
354 bytes -= pgbase;
584aa810 355 npages = result;
607f31e8
TM
356 }
357
584aa810
FI
358 for (i = 0; i < npages; i++) {
359 struct nfs_page *req;
360 unsigned int req_len = min(bytes, PAGE_SIZE - pgbase);
361 /* XXX do we need to do the eof zeroing found in async_filler? */
362 req = nfs_create_request(dreq->ctx, dreq->inode,
363 pagevec[i],
364 pgbase, req_len);
365 if (IS_ERR(req)) {
584aa810
FI
366 result = PTR_ERR(req);
367 break;
368 }
369 req->wb_index = pos >> PAGE_SHIFT;
370 req->wb_offset = pos & ~PAGE_MASK;
371 if (!nfs_pageio_add_request(desc, req)) {
372 result = desc->pg_error;
373 nfs_release_request(req);
584aa810
FI
374 break;
375 }
376 pgbase = 0;
377 bytes -= req_len;
378 started += req_len;
379 user_addr += req_len;
380 pos += req_len;
381 count -= req_len;
382 }
6d74743b
TM
383 /* The nfs_page now hold references to these pages */
384 nfs_direct_release_pages(pagevec, npages);
71e8cc00 385 } while (count != 0 && result >= 0);
607f31e8 386
584aa810
FI
387 kfree(pagevec);
388
607f31e8 389 if (started)
c216fd70 390 return started;
607f31e8 391 return result < 0 ? (ssize_t) result : -EFAULT;
1da177e4
LT
392}
393
19f73787
CL
394static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
395 const struct iovec *iov,
396 unsigned long nr_segs,
397 loff_t pos)
398{
584aa810 399 struct nfs_pageio_descriptor desc;
19f73787
CL
400 ssize_t result = -EINVAL;
401 size_t requested_bytes = 0;
402 unsigned long seg;
403
584aa810
FI
404 nfs_pageio_init_read(&desc, dreq->inode,
405 &nfs_direct_read_completion_ops);
19f73787 406 get_dreq(dreq);
584aa810 407 desc.pg_dreq = dreq;
19f73787
CL
408
409 for (seg = 0; seg < nr_segs; seg++) {
410 const struct iovec *vec = &iov[seg];
584aa810 411 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
19f73787
CL
412 if (result < 0)
413 break;
414 requested_bytes += result;
415 if ((size_t)result < vec->iov_len)
416 break;
417 pos += vec->iov_len;
418 }
419
584aa810
FI
420 nfs_pageio_complete(&desc);
421
839f7ad6
CL
422 /*
423 * If no bytes were started, return the error, and let the
424 * generic layer handle the completion.
425 */
426 if (requested_bytes == 0) {
427 nfs_direct_req_release(dreq);
428 return result < 0 ? result : -EIO;
429 }
430
19f73787
CL
431 if (put_dreq(dreq))
432 nfs_direct_complete(dreq);
839f7ad6 433 return 0;
19f73787
CL
434}
435
c216fd70
CL
436static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
437 unsigned long nr_segs, loff_t pos)
1da177e4 438{
f11ac8db 439 ssize_t result = -ENOMEM;
99514f8f 440 struct inode *inode = iocb->ki_filp->f_mapping->host;
1da177e4
LT
441 struct nfs_direct_req *dreq;
442
607f31e8 443 dreq = nfs_direct_req_alloc();
f11ac8db
TM
444 if (dreq == NULL)
445 goto out;
1da177e4 446
91d5b470 447 dreq->inode = inode;
cd3758e3 448 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db
TM
449 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
450 if (dreq->l_ctx == NULL)
451 goto out_release;
487b8372
CL
452 if (!is_sync_kiocb(iocb))
453 dreq->iocb = iocb;
1da177e4 454
c216fd70 455 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
456 if (!result)
457 result = nfs_direct_wait(dreq);
f11ac8db 458out_release:
b4946ffb 459 nfs_direct_req_release(dreq);
f11ac8db 460out:
1da177e4
LT
461 return result;
462}
463
fad61490
TM
464#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
465static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
466{
1763da12
FI
467 struct nfs_pageio_descriptor desc;
468 struct nfs_page *req, *tmp;
469 LIST_HEAD(reqs);
470 struct nfs_commit_info cinfo;
471 LIST_HEAD(failed);
472
473 nfs_init_cinfo_from_dreq(&cinfo, dreq);
474 pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
475 spin_lock(cinfo.lock);
476 nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
477 spin_unlock(cinfo.lock);
1da177e4 478
fad61490 479 dreq->count = 0;
607f31e8
TM
480 get_dreq(dreq);
481
1763da12
FI
482 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
483 &nfs_direct_write_completion_ops);
484 desc.pg_dreq = dreq;
fedb595c 485
1763da12
FI
486 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
487 if (!nfs_pageio_add_request(&desc, req)) {
488 nfs_list_add_request(req, &failed);
489 spin_lock(cinfo.lock);
490 dreq->flags = 0;
491 dreq->error = -EIO;
492 spin_unlock(cinfo.lock);
493 }
494 }
495 nfs_pageio_complete(&desc);
fad61490 496
1763da12 497 while (!list_empty(&failed)) {
1763da12
FI
498 nfs_release_request(req);
499 nfs_unlock_request(req);
500 }
fad61490 501
1763da12
FI
502 if (put_dreq(dreq))
503 nfs_direct_write_complete(dreq, dreq->inode);
c9d8f89d
TM
504}
505
1763da12 506static void nfs_direct_commit_complete(struct nfs_commit_data *data)
c9d8f89d 507{
0b7c0153 508 struct nfs_direct_req *dreq = data->dreq;
1763da12
FI
509 struct nfs_commit_info cinfo;
510 struct nfs_page *req;
c9d8f89d
TM
511 int status = data->task.tk_status;
512
1763da12 513 nfs_init_cinfo_from_dreq(&cinfo, dreq);
c9d8f89d 514 if (status < 0) {
60fa3f76 515 dprintk("NFS: %5u commit failed with error %d.\n",
1763da12 516 data->task.tk_pid, status);
fad61490 517 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
60fa3f76 518 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
c9d8f89d 519 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
fad61490 520 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
1da177e4
LT
521 }
522
c9d8f89d 523 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
1763da12
FI
524 while (!list_empty(&data->pages)) {
525 req = nfs_list_entry(data->pages.next);
526 nfs_list_remove_request(req);
527 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
528 /* Note the rewrite will go through mds */
529 nfs_mark_request_commit(req, NULL, &cinfo);
6d74743b 530 } else
1763da12 531 nfs_release_request(req);
1763da12
FI
532 nfs_unlock_request(req);
533 }
534
535 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
536 nfs_direct_write_complete(dreq, data->inode);
1da177e4
LT
537}
538
1763da12
FI
539static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
540{
541 /* There is no lock to clear */
542}
543
544static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
545 .completion = nfs_direct_commit_complete,
546 .error_cleanup = nfs_direct_error_cleanup,
fad61490
TM
547};
548
549static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
1da177e4 550{
1763da12
FI
551 int res;
552 struct nfs_commit_info cinfo;
553 LIST_HEAD(mds_list);
554
555 nfs_init_cinfo_from_dreq(&cinfo, dreq);
556 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
557 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
558 if (res < 0) /* res == -ENOMEM */
559 nfs_direct_write_reschedule(dreq);
fad61490 560}
1da177e4 561
1763da12 562static void nfs_direct_write_schedule_work(struct work_struct *work)
fad61490 563{
1763da12 564 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
fad61490 565 int flags = dreq->flags;
1da177e4 566
fad61490
TM
567 dreq->flags = 0;
568 switch (flags) {
569 case NFS_ODIRECT_DO_COMMIT:
570 nfs_direct_commit_schedule(dreq);
1da177e4 571 break;
fad61490
TM
572 case NFS_ODIRECT_RESCHED_WRITES:
573 nfs_direct_write_reschedule(dreq);
574 break;
575 default:
1763da12 576 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
fad61490
TM
577 nfs_direct_complete(dreq);
578 }
579}
1da177e4 580
1763da12 581static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
fad61490 582{
1763da12 583 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
fad61490 584}
1763da12 585
fad61490 586#else
24fc9211
BS
587static void nfs_direct_write_schedule_work(struct work_struct *work)
588{
589}
1da177e4 590
fad61490
TM
591static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
592{
cd9ae2b6 593 nfs_zap_mapping(inode, inode->i_mapping);
fad61490
TM
594 nfs_direct_complete(dreq);
595}
596#endif
1da177e4 597
c9d8f89d
TM
598/*
599 * NB: Return the value of the first error return code. Subsequent
600 * errors after the first one are ignored.
601 */
462d5b32 602/*
607f31e8
TM
603 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
604 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
605 * bail and stop sending more writes. Write length accounting is
606 * handled automatically by nfs_direct_write_result(). Otherwise, if
607 * no requests have been sent, just return an error.
462d5b32 608 */
1763da12 609static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946 610 const struct iovec *iov,
1763da12 611 loff_t pos)
462d5b32 612{
1763da12 613 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 614 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 615 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
616 unsigned long user_addr = (unsigned long)iov->iov_base;
617 size_t count = iov->iov_len;
462d5b32 618 size_t wsize = NFS_SERVER(inode)->wsize;
607f31e8
TM
619 unsigned int pgbase;
620 int result;
621 ssize_t started = 0;
1763da12
FI
622 struct page **pagevec = NULL;
623 unsigned int npages;
82b145c5 624
1da177e4 625 do {
462d5b32 626 size_t bytes;
1763da12 627 int i;
462d5b32 628
e9f7bee1 629 pgbase = user_addr & ~PAGE_MASK;
1763da12 630 bytes = min(max(wsize, PAGE_SIZE), count);
e9f7bee1 631
607f31e8 632 result = -ENOMEM;
1763da12
FI
633 npages = nfs_page_array_len(pgbase, bytes);
634 if (!pagevec)
635 pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
636 if (!pagevec)
607f31e8
TM
637 break;
638
607f31e8
TM
639 down_read(&current->mm->mmap_sem);
640 result = get_user_pages(current, current->mm, user_addr,
1763da12 641 npages, 0, 0, pagevec, NULL);
607f31e8 642 up_read(&current->mm->mmap_sem);
1763da12 643 if (result < 0)
749e146e 644 break;
1763da12
FI
645
646 if ((unsigned)result < npages) {
d9df8d6b
TM
647 bytes = result * PAGE_SIZE;
648 if (bytes <= pgbase) {
1763da12 649 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
650 break;
651 }
652 bytes -= pgbase;
1763da12 653 npages = result;
607f31e8
TM
654 }
655
1763da12
FI
656 for (i = 0; i < npages; i++) {
657 struct nfs_page *req;
658 unsigned int req_len = min(bytes, PAGE_SIZE - pgbase);
1da177e4 659
1763da12
FI
660 req = nfs_create_request(dreq->ctx, dreq->inode,
661 pagevec[i],
662 pgbase, req_len);
663 if (IS_ERR(req)) {
1763da12
FI
664 result = PTR_ERR(req);
665 break;
666 }
667 nfs_lock_request(req);
668 req->wb_index = pos >> PAGE_SHIFT;
669 req->wb_offset = pos & ~PAGE_MASK;
670 if (!nfs_pageio_add_request(desc, req)) {
671 result = desc->pg_error;
672 nfs_unlock_request(req);
673 nfs_release_request(req);
71e8cc00 674 break;
1763da12
FI
675 }
676 pgbase = 0;
677 bytes -= req_len;
678 started += req_len;
679 user_addr += req_len;
680 pos += req_len;
681 count -= req_len;
682 }
6d74743b
TM
683 /* The nfs_page now hold references to these pages */
684 nfs_direct_release_pages(pagevec, npages);
71e8cc00 685 } while (count != 0 && result >= 0);
607f31e8 686
1763da12
FI
687 kfree(pagevec);
688
607f31e8 689 if (started)
c216fd70 690 return started;
607f31e8 691 return result < 0 ? (ssize_t) result : -EFAULT;
462d5b32 692}
1da177e4 693
1763da12
FI
694static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
695{
696 struct nfs_direct_req *dreq = hdr->dreq;
697 struct nfs_commit_info cinfo;
698 int bit = -1;
699 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
700
701 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
702 goto out_put;
703
704 nfs_init_cinfo_from_dreq(&cinfo, dreq);
705
706 spin_lock(&dreq->lock);
707
708 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
709 dreq->flags = 0;
710 dreq->error = hdr->error;
711 }
712 if (dreq->error != 0)
713 bit = NFS_IOHDR_ERROR;
714 else {
715 dreq->count += hdr->good_bytes;
716 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
717 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
718 bit = NFS_IOHDR_NEED_RESCHED;
719 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
720 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
721 bit = NFS_IOHDR_NEED_RESCHED;
722 else if (dreq->flags == 0) {
723 memcpy(&dreq->verf, &req->wb_verf,
724 sizeof(dreq->verf));
725 bit = NFS_IOHDR_NEED_COMMIT;
726 dreq->flags = NFS_ODIRECT_DO_COMMIT;
727 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
728 if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
729 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
730 bit = NFS_IOHDR_NEED_RESCHED;
731 } else
732 bit = NFS_IOHDR_NEED_COMMIT;
733 }
734 }
735 }
736 spin_unlock(&dreq->lock);
737
738 while (!list_empty(&hdr->pages)) {
739 req = nfs_list_entry(hdr->pages.next);
740 nfs_list_remove_request(req);
741 switch (bit) {
742 case NFS_IOHDR_NEED_RESCHED:
743 case NFS_IOHDR_NEED_COMMIT:
744 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
745 break;
746 default:
1763da12
FI
747 nfs_release_request(req);
748 }
749 nfs_unlock_request(req);
750 }
751
752out_put:
753 if (put_dreq(dreq))
754 nfs_direct_write_complete(dreq, hdr->inode);
755 hdr->release(hdr);
756}
757
3e9e0ca3
TM
758static void nfs_write_sync_pgio_error(struct list_head *head)
759{
760 struct nfs_page *req;
761
762 while (!list_empty(head)) {
763 req = nfs_list_entry(head->next);
764 nfs_list_remove_request(req);
765 nfs_release_request(req);
766 nfs_unlock_request(req);
767 }
768}
769
1763da12 770static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
3e9e0ca3 771 .error_cleanup = nfs_write_sync_pgio_error,
1763da12
FI
772 .init_hdr = nfs_direct_pgio_init,
773 .completion = nfs_direct_write_completion,
774};
775
19f73787
CL
776static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
777 const struct iovec *iov,
778 unsigned long nr_segs,
1763da12 779 loff_t pos)
19f73787 780{
1763da12 781 struct nfs_pageio_descriptor desc;
19f73787
CL
782 ssize_t result = 0;
783 size_t requested_bytes = 0;
784 unsigned long seg;
785
1763da12
FI
786 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
787 &nfs_direct_write_completion_ops);
788 desc.pg_dreq = dreq;
19f73787
CL
789 get_dreq(dreq);
790
791 for (seg = 0; seg < nr_segs; seg++) {
792 const struct iovec *vec = &iov[seg];
1763da12 793 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
19f73787
CL
794 if (result < 0)
795 break;
796 requested_bytes += result;
797 if ((size_t)result < vec->iov_len)
798 break;
799 pos += vec->iov_len;
800 }
1763da12 801 nfs_pageio_complete(&desc);
19f73787 802
839f7ad6
CL
803 /*
804 * If no bytes were started, return the error, and let the
805 * generic layer handle the completion.
806 */
807 if (requested_bytes == 0) {
808 nfs_direct_req_release(dreq);
809 return result < 0 ? result : -EIO;
810 }
811
19f73787
CL
812 if (put_dreq(dreq))
813 nfs_direct_write_complete(dreq, dreq->inode);
839f7ad6 814 return 0;
19f73787
CL
815}
816
c216fd70
CL
817static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
818 unsigned long nr_segs, loff_t pos,
819 size_t count)
462d5b32 820{
f11ac8db 821 ssize_t result = -ENOMEM;
c89f2ee5 822 struct inode *inode = iocb->ki_filp->f_mapping->host;
462d5b32 823 struct nfs_direct_req *dreq;
1da177e4 824
607f31e8 825 dreq = nfs_direct_req_alloc();
462d5b32 826 if (!dreq)
f11ac8db 827 goto out;
1da177e4 828
c89f2ee5 829 dreq->inode = inode;
cd3758e3 830 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db 831 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
568a810d 832 if (dreq->l_ctx == NULL)
f11ac8db 833 goto out_release;
c89f2ee5
CL
834 if (!is_sync_kiocb(iocb))
835 dreq->iocb = iocb;
1da177e4 836
1763da12 837 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
838 if (!result)
839 result = nfs_direct_wait(dreq);
f11ac8db 840out_release:
b4946ffb 841 nfs_direct_req_release(dreq);
f11ac8db 842out:
1da177e4
LT
843 return result;
844}
845
846/**
847 * nfs_file_direct_read - file direct read operation for NFS files
848 * @iocb: target I/O control block
027445c3
BP
849 * @iov: vector of user buffers into which to read data
850 * @nr_segs: size of iov vector
88467055 851 * @pos: byte offset in file where reading starts
1da177e4
LT
852 *
853 * We use this function for direct reads instead of calling
854 * generic_file_aio_read() in order to avoid gfar's check to see if
855 * the request starts before the end of the file. For that check
856 * to work, we must generate a GETATTR before each direct read, and
857 * even then there is a window between the GETATTR and the subsequent
88467055 858 * READ where the file size could change. Our preference is simply
1da177e4
LT
859 * to do all reads the application wants, and the server will take
860 * care of managing the end of file boundary.
88467055 861 *
1da177e4
LT
862 * This function also eliminates unnecessarily updating the file's
863 * atime locally, as the NFS server sets the file's atime, and this
864 * client must read the updated atime from the server back into its
865 * cache.
866 */
027445c3
BP
867ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
868 unsigned long nr_segs, loff_t pos)
1da177e4
LT
869{
870 ssize_t retval = -EINVAL;
1da177e4 871 struct file *file = iocb->ki_filp;
1da177e4 872 struct address_space *mapping = file->f_mapping;
c216fd70
CL
873 size_t count;
874
875 count = iov_length(iov, nr_segs);
876 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
1da177e4 877
6da24bc9 878 dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
01cce933
JJS
879 file->f_path.dentry->d_parent->d_name.name,
880 file->f_path.dentry->d_name.name,
c216fd70 881 count, (long long) pos);
1da177e4 882
1da177e4
LT
883 retval = 0;
884 if (!count)
885 goto out;
886
29884df0
TM
887 retval = nfs_sync_mapping(mapping);
888 if (retval)
889 goto out;
1da177e4 890
7ec10f26
KK
891 task_io_account_read(count);
892
c216fd70 893 retval = nfs_direct_read(iocb, iov, nr_segs, pos);
1da177e4 894 if (retval > 0)
0cdd80d0 895 iocb->ki_pos = pos + retval;
1da177e4
LT
896
897out:
898 return retval;
899}
900
901/**
902 * nfs_file_direct_write - file direct write operation for NFS files
903 * @iocb: target I/O control block
027445c3
BP
904 * @iov: vector of user buffers from which to write data
905 * @nr_segs: size of iov vector
88467055 906 * @pos: byte offset in file where writing starts
1da177e4
LT
907 *
908 * We use this function for direct writes instead of calling
909 * generic_file_aio_write() in order to avoid taking the inode
910 * semaphore and updating the i_size. The NFS server will set
911 * the new i_size and this client must read the updated size
912 * back into its cache. We let the server do generic write
913 * parameter checking and report problems.
914 *
1da177e4
LT
915 * We eliminate local atime updates, see direct read above.
916 *
917 * We avoid unnecessary page cache invalidations for normal cached
918 * readers of this file.
919 *
920 * Note that O_APPEND is not supported for NFS direct writes, as there
921 * is no atomic O_APPEND write facility in the NFS protocol.
922 */
027445c3
BP
923ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
924 unsigned long nr_segs, loff_t pos)
1da177e4 925{
070ea602 926 ssize_t retval = -EINVAL;
1da177e4 927 struct file *file = iocb->ki_filp;
1da177e4 928 struct address_space *mapping = file->f_mapping;
c216fd70 929 size_t count;
1da177e4 930
c216fd70
CL
931 count = iov_length(iov, nr_segs);
932 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
933
6da24bc9 934 dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
01cce933
JJS
935 file->f_path.dentry->d_parent->d_name.name,
936 file->f_path.dentry->d_name.name,
c216fd70 937 count, (long long) pos);
027445c3 938
ce1a8e67
CL
939 retval = generic_write_checks(file, &pos, &count, 0);
940 if (retval)
1da177e4 941 goto out;
ce1a8e67
CL
942
943 retval = -EINVAL;
944 if ((ssize_t) count < 0)
1da177e4 945 goto out;
1da177e4
LT
946 retval = 0;
947 if (!count)
948 goto out;
ce1a8e67 949
29884df0
TM
950 retval = nfs_sync_mapping(mapping);
951 if (retval)
952 goto out;
1da177e4 953
7ec10f26
KK
954 task_io_account_write(count);
955
c216fd70 956 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1763da12
FI
957 if (retval > 0) {
958 struct inode *inode = mapping->host;
9eafa8cc 959
ce1a8e67 960 iocb->ki_pos = pos + retval;
1763da12
FI
961 spin_lock(&inode->i_lock);
962 if (i_size_read(inode) < iocb->ki_pos)
963 i_size_write(inode, iocb->ki_pos);
964 spin_unlock(&inode->i_lock);
965 }
1da177e4
LT
966out:
967 return retval;
968}
969
88467055
CL
970/**
971 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
972 *
973 */
f7b422b1 974int __init nfs_init_directcache(void)
1da177e4
LT
975{
976 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
977 sizeof(struct nfs_direct_req),
fffb60f9
PJ
978 0, (SLAB_RECLAIM_ACCOUNT|
979 SLAB_MEM_SPREAD),
20c2df83 980 NULL);
1da177e4
LT
981 if (nfs_direct_cachep == NULL)
982 return -ENOMEM;
983
984 return 0;
985}
986
88467055 987/**
f7b422b1 988 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
88467055
CL
989 *
990 */
266bee88 991void nfs_destroy_directcache(void)
1da177e4 992{
1a1d92c1 993 kmem_cache_destroy(nfs_direct_cachep);
1da177e4 994}
This page took 0.624752 seconds and 5 git commands to generate.