nfs: Use UNSTABLE + COMMIT for NFS O_DIRECT writes
[deliverable/linux.git] / fs / nfs / direct.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
88467055 10 * (multiple copies of the same instance running on separate hosts)
1da177e4 11 * implement their own cache coherency protocol that subsumes file
88467055
CL
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
1da177e4
LT
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
88467055 37 * 04 May 2005 support O_DIRECT with aio --cel
1da177e4
LT
38 *
39 */
40
41#include <linux/config.h>
42#include <linux/errno.h>
43#include <linux/sched.h>
44#include <linux/kernel.h>
45#include <linux/smp_lock.h>
46#include <linux/file.h>
47#include <linux/pagemap.h>
48#include <linux/kref.h>
49
50#include <linux/nfs_fs.h>
51#include <linux/nfs_page.h>
52#include <linux/sunrpc/clnt.h>
53
54#include <asm/system.h>
55#include <asm/uaccess.h>
56#include <asm/atomic.h>
57
91d5b470
CL
58#include "iostat.h"
59
1da177e4 60#define NFSDBG_FACILITY NFSDBG_VFS
1da177e4 61
143f412e 62static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
1da177e4
LT
63static kmem_cache_t *nfs_direct_cachep;
64
65/*
66 * This represents a set of asynchronous requests that we're waiting on
67 */
68struct nfs_direct_req {
69 struct kref kref; /* release manager */
15ce4a0c
CL
70
71 /* I/O parameters */
fad61490
TM
72 struct list_head list, /* nfs_read/write_data structs */
73 rewrite_list; /* saved nfs_write_data structs */
99514f8f
CL
74 struct file * filp; /* file descriptor */
75 struct kiocb * iocb; /* controlling i/o request */
1da177e4 76 wait_queue_head_t wait; /* wait for i/o completion */
88467055 77 struct inode * inode; /* target file of i/o */
fad61490
TM
78 unsigned long user_addr; /* location of user's buffer */
79 size_t user_count; /* total bytes to move */
80 loff_t pos; /* starting offset in file */
1da177e4
LT
81 struct page ** pages; /* pages in our buffer */
82 unsigned int npages; /* count of pages */
15ce4a0c
CL
83
84 /* completion state */
85 spinlock_t lock; /* protect completion state */
86 int outstanding; /* i/os we're waiting for */
87 ssize_t count, /* bytes actually processed */
1da177e4 88 error; /* any reported error */
fad61490
TM
89
90 /* commit state */
91 struct nfs_write_data * commit_data; /* special write_data for commits */
92 int flags;
93#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
94#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
95 struct nfs_writeverf verf; /* unstable write verifier */
1da177e4
LT
96};
97
fad61490
TM
98static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
99static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
100
b8a32e2b
CL
101/**
102 * nfs_direct_IO - NFS address space operation for direct I/O
103 * @rw: direction (read or write)
104 * @iocb: target I/O control block
105 * @iov: array of vectors that define I/O buffer
106 * @pos: offset in file to begin the operation
107 * @nr_segs: size of iovec array
108 *
109 * The presence of this routine in the address space ops vector means
110 * the NFS client supports direct I/O. However, we shunt off direct
111 * read and write requests before the VFS gets them, so this method
112 * should never be called.
113 */
114ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
115{
116 struct dentry *dentry = iocb->ki_filp->f_dentry;
117
118 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
119 dentry->d_name.name, (long long) pos, nr_segs);
120
121 return -EINVAL;
122}
123
d4cc948b 124static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
1da177e4
LT
125{
126 int result = -ENOMEM;
127 unsigned long page_count;
128 size_t array_size;
129
1da177e4
LT
130 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
131 page_count -= user_addr >> PAGE_SHIFT;
132
133 array_size = (page_count * sizeof(struct page *));
134 *pages = kmalloc(array_size, GFP_KERNEL);
135 if (*pages) {
136 down_read(&current->mm->mmap_sem);
137 result = get_user_pages(current, current->mm, user_addr,
138 page_count, (rw == READ), 0,
139 *pages, NULL);
140 up_read(&current->mm->mmap_sem);
143f412e
TM
141 /*
142 * If we got fewer pages than expected from get_user_pages(),
143 * the user buffer runs off the end of a mapping; return EFAULT.
144 */
145 if (result >= 0 && result < page_count) {
146 nfs_free_user_pages(*pages, result, 0);
147 *pages = NULL;
148 result = -EFAULT;
149 }
1da177e4
LT
150 }
151 return result;
152}
153
d4cc948b 154static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
1da177e4
LT
155{
156 int i;
157 for (i = 0; i < npages; i++) {
566dd606
TM
158 struct page *page = pages[i];
159 if (do_dirty && !PageCompound(page))
160 set_page_dirty_lock(page);
161 page_cache_release(page);
1da177e4
LT
162 }
163 kfree(pages);
164}
165
93619e59
CL
166static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
167{
168 struct nfs_direct_req *dreq;
169
170 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
171 if (!dreq)
172 return NULL;
173
174 kref_init(&dreq->kref);
175 init_waitqueue_head(&dreq->wait);
176 INIT_LIST_HEAD(&dreq->list);
fad61490 177 INIT_LIST_HEAD(&dreq->rewrite_list);
93619e59 178 dreq->iocb = NULL;
15ce4a0c
CL
179 spin_lock_init(&dreq->lock);
180 dreq->outstanding = 0;
181 dreq->count = 0;
182 dreq->error = 0;
fad61490 183 dreq->flags = 0;
93619e59
CL
184
185 return dreq;
186}
187
1da177e4
LT
188static void nfs_direct_req_release(struct kref *kref)
189{
190 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
191 kmem_cache_free(nfs_direct_cachep, dreq);
192}
193
bc0fb201
CL
194/*
195 * Collects and returns the final error value/byte-count.
196 */
197static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
198{
15ce4a0c 199 ssize_t result = -EIOCBQUEUED;
bc0fb201
CL
200
201 /* Async requests don't wait here */
202 if (dreq->iocb)
203 goto out;
204
15ce4a0c 205 result = wait_event_interruptible(dreq->wait, (dreq->outstanding == 0));
bc0fb201
CL
206
207 if (!result)
15ce4a0c 208 result = dreq->error;
bc0fb201 209 if (!result)
15ce4a0c 210 result = dreq->count;
bc0fb201
CL
211
212out:
213 kref_put(&dreq->kref, nfs_direct_req_release);
214 return (ssize_t) result;
215}
216
63ab46ab
CL
217/*
218 * We must hold a reference to all the pages in this direct read request
219 * until the RPCs complete. This could be long *after* we are woken up in
220 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
221 *
222 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
223 * can't trust the iocb is still valid here if this is a synchronous
224 * request. If the waiter is woken prematurely, the iocb is long gone.
225 */
226static void nfs_direct_complete(struct nfs_direct_req *dreq)
227{
228 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
229
230 if (dreq->iocb) {
15ce4a0c 231 long res = (long) dreq->error;
63ab46ab 232 if (!res)
15ce4a0c 233 res = (long) dreq->count;
63ab46ab
CL
234 aio_complete(dreq->iocb, res, 0);
235 } else
236 wake_up(&dreq->wait);
237
a37ec012 238 iput(dreq->inode);
63ab46ab
CL
239 kref_put(&dreq->kref, nfs_direct_req_release);
240}
241
d4cc948b 242/*
1da177e4
LT
243 * Note we also set the number of requests we have in the dreq when we are
244 * done. This prevents races with I/O completion so we will always wait
245 * until all requests have been dispatched and completed.
246 */
5dd602f2 247static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
1da177e4
LT
248{
249 struct list_head *list;
250 struct nfs_direct_req *dreq;
40859d7e 251 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1da177e4 252
93619e59 253 dreq = nfs_direct_req_alloc();
1da177e4
LT
254 if (!dreq)
255 return NULL;
256
1da177e4
LT
257 list = &dreq->list;
258 for(;;) {
40859d7e 259 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
1da177e4
LT
260
261 if (unlikely(!data)) {
262 while (!list_empty(list)) {
263 data = list_entry(list->next,
264 struct nfs_read_data, pages);
265 list_del(&data->pages);
266 nfs_readdata_free(data);
267 }
268 kref_put(&dreq->kref, nfs_direct_req_release);
269 return NULL;
270 }
271
272 INIT_LIST_HEAD(&data->pages);
273 list_add(&data->pages, list);
274
275 data->req = (struct nfs_page *) dreq;
15ce4a0c 276 dreq->outstanding++;
1da177e4
LT
277 if (nbytes <= rsize)
278 break;
279 nbytes -= rsize;
280 }
281 kref_get(&dreq->kref);
1da177e4
LT
282 return dreq;
283}
284
ec06c096 285static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
1da177e4 286{
ec06c096 287 struct nfs_read_data *data = calldata;
1da177e4
LT
288 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
289
ec06c096
TM
290 if (nfs_readpage_result(task, data) != 0)
291 return;
15ce4a0c
CL
292
293 spin_lock(&dreq->lock);
294
ec06c096 295 if (likely(task->tk_status >= 0))
15ce4a0c 296 dreq->count += data->res.count;
1da177e4 297 else
15ce4a0c
CL
298 dreq->error = task->tk_status;
299
300 if (--dreq->outstanding) {
301 spin_unlock(&dreq->lock);
302 return;
303 }
1da177e4 304
15ce4a0c
CL
305 spin_unlock(&dreq->lock);
306 nfs_direct_complete(dreq);
1da177e4
LT
307}
308
ec06c096
TM
309static const struct rpc_call_ops nfs_read_direct_ops = {
310 .rpc_call_done = nfs_direct_read_result,
311 .rpc_release = nfs_readdata_release,
312};
313
d4cc948b 314/*
1da177e4
LT
315 * For each nfs_read_data struct that was allocated on the list, dispatch
316 * an NFS READ operation
317 */
fad61490 318static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
1da177e4 319{
99514f8f
CL
320 struct file *file = dreq->filp;
321 struct inode *inode = file->f_mapping->host;
322 struct nfs_open_context *ctx = (struct nfs_open_context *)
323 file->private_data;
1da177e4
LT
324 struct list_head *list = &dreq->list;
325 struct page **pages = dreq->pages;
fad61490
TM
326 size_t count = dreq->user_count;
327 loff_t pos = dreq->pos;
5dd602f2 328 size_t rsize = NFS_SERVER(inode)->rsize;
1da177e4 329 unsigned int curpage, pgbase;
1da177e4
LT
330
331 curpage = 0;
fad61490 332 pgbase = dreq->user_addr & ~PAGE_MASK;
1da177e4
LT
333 do {
334 struct nfs_read_data *data;
5dd602f2 335 size_t bytes;
1da177e4
LT
336
337 bytes = rsize;
338 if (count < rsize)
339 bytes = count;
340
341 data = list_entry(list->next, struct nfs_read_data, pages);
342 list_del_init(&data->pages);
343
344 data->inode = inode;
345 data->cred = ctx->cred;
346 data->args.fh = NFS_FH(inode);
347 data->args.context = ctx;
88467055 348 data->args.offset = pos;
1da177e4
LT
349 data->args.pgbase = pgbase;
350 data->args.pages = &pages[curpage];
351 data->args.count = bytes;
352 data->res.fattr = &data->fattr;
353 data->res.eof = 0;
354 data->res.count = bytes;
355
ec06c096
TM
356 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
357 &nfs_read_direct_ops, data);
1da177e4
LT
358 NFS_PROTO(inode)->read_setup(data);
359
360 data->task.tk_cookie = (unsigned long) inode;
1da177e4
LT
361
362 lock_kernel();
363 rpc_execute(&data->task);
364 unlock_kernel();
365
366 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
367 data->task.tk_pid,
368 inode->i_sb->s_id,
369 (long long)NFS_FILEID(inode),
370 bytes,
371 (unsigned long long)data->args.offset);
372
88467055 373 pos += bytes;
1da177e4
LT
374 pgbase += bytes;
375 curpage += pgbase >> PAGE_SHIFT;
376 pgbase &= ~PAGE_MASK;
377
378 count -= bytes;
379 } while (count != 0);
380}
381
88467055 382static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
1da177e4
LT
383{
384 ssize_t result;
385 sigset_t oldset;
99514f8f 386 struct inode *inode = iocb->ki_filp->f_mapping->host;
1da177e4
LT
387 struct rpc_clnt *clnt = NFS_CLIENT(inode);
388 struct nfs_direct_req *dreq;
389
390 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
391 if (!dreq)
392 return -ENOMEM;
393
fad61490
TM
394 dreq->user_addr = user_addr;
395 dreq->user_count = count;
396 dreq->pos = pos;
1da177e4
LT
397 dreq->pages = pages;
398 dreq->npages = nr_pages;
a37ec012 399 igrab(inode);
91d5b470 400 dreq->inode = inode;
99514f8f 401 dreq->filp = iocb->ki_filp;
487b8372
CL
402 if (!is_sync_kiocb(iocb))
403 dreq->iocb = iocb;
1da177e4 404
91d5b470 405 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
1da177e4 406 rpc_clnt_sigmask(clnt, &oldset);
fad61490 407 nfs_direct_read_schedule(dreq);
bc0fb201 408 result = nfs_direct_wait(dreq);
1da177e4
LT
409 rpc_clnt_sigunmask(clnt, &oldset);
410
411 return result;
412}
413
fad61490
TM
414static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
415{
416 list_splice_init(&dreq->rewrite_list, &dreq->list);
417 while (!list_empty(&dreq->list)) {
418 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
419 list_del(&data->pages);
420 nfs_writedata_release(data);
421 }
422}
423
424#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
425static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
426{
427 struct list_head *pos;
428
429 list_splice_init(&dreq->rewrite_list, &dreq->list);
430 list_for_each(pos, &dreq->list)
431 dreq->outstanding++;
432 dreq->count = 0;
433
434 nfs_direct_write_schedule(dreq, FLUSH_STABLE);
435}
436
437static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
438{
439 struct nfs_write_data *data = calldata;
440 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
441
442 /* Call the NFS version-specific code */
443 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
444 return;
445 if (unlikely(task->tk_status < 0)) {
446 dreq->error = task->tk_status;
447 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
448 }
449 if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
450 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
451 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
452 }
453
454 dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
455 nfs_direct_write_complete(dreq, data->inode);
456}
457
458static const struct rpc_call_ops nfs_commit_direct_ops = {
459 .rpc_call_done = nfs_direct_commit_result,
460 .rpc_release = nfs_commit_release,
461};
462
463static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
464{
465 struct file *file = dreq->filp;
466 struct nfs_open_context *ctx = (struct nfs_open_context *)
467 file->private_data;
468 struct nfs_write_data *data = dreq->commit_data;
469 struct rpc_task *task = &data->task;
470
471 data->inode = dreq->inode;
472 data->cred = ctx->cred;
473
474 data->args.fh = NFS_FH(data->inode);
475 data->args.offset = dreq->pos;
476 data->args.count = dreq->user_count;
477 data->res.count = 0;
478 data->res.fattr = &data->fattr;
479 data->res.verf = &data->verf;
480
481 rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
482 &nfs_commit_direct_ops, data);
483 NFS_PROTO(data->inode)->commit_setup(data, 0);
484
485 data->task.tk_priority = RPC_PRIORITY_NORMAL;
486 data->task.tk_cookie = (unsigned long)data->inode;
487 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
488 dreq->commit_data = NULL;
489
490 dprintk("NFS: %5u initiated commit call\n", task->tk_pid);
491
492 lock_kernel();
493 rpc_execute(&data->task);
494 unlock_kernel();
495}
496
497static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
498{
499 int flags = dreq->flags;
500
501 dreq->flags = 0;
502 switch (flags) {
503 case NFS_ODIRECT_DO_COMMIT:
504 nfs_direct_commit_schedule(dreq);
505 break;
506 case NFS_ODIRECT_RESCHED_WRITES:
507 nfs_direct_write_reschedule(dreq);
508 break;
509 default:
510 nfs_end_data_update(inode);
511 if (dreq->commit_data != NULL)
512 nfs_commit_free(dreq->commit_data);
513 nfs_direct_free_writedata(dreq);
514 nfs_direct_complete(dreq);
515 }
516}
517
518static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
519{
520 dreq->commit_data = nfs_commit_alloc(0);
521 if (dreq->commit_data != NULL)
522 dreq->commit_data->req = (struct nfs_page *) dreq;
523}
524#else
525static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
526{
527 dreq->commit_data = NULL;
528}
529
530static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
531{
532 nfs_end_data_update(inode);
533 nfs_direct_free_writedata(dreq);
534 nfs_direct_complete(dreq);
535}
536#endif
537
462d5b32 538static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
1da177e4 539{
462d5b32
CL
540 struct list_head *list;
541 struct nfs_direct_req *dreq;
462d5b32 542 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1da177e4 543
462d5b32
CL
544 dreq = nfs_direct_req_alloc();
545 if (!dreq)
546 return NULL;
547
548 list = &dreq->list;
549 for(;;) {
550 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
551
552 if (unlikely(!data)) {
553 while (!list_empty(list)) {
554 data = list_entry(list->next,
555 struct nfs_write_data, pages);
556 list_del(&data->pages);
557 nfs_writedata_free(data);
558 }
559 kref_put(&dreq->kref, nfs_direct_req_release);
560 return NULL;
561 }
562
563 INIT_LIST_HEAD(&data->pages);
564 list_add(&data->pages, list);
565
566 data->req = (struct nfs_page *) dreq;
15ce4a0c 567 dreq->outstanding++;
462d5b32
CL
568 if (nbytes <= wsize)
569 break;
570 nbytes -= wsize;
571 }
fad61490
TM
572
573 nfs_alloc_commit_data(dreq);
574
462d5b32 575 kref_get(&dreq->kref);
462d5b32
CL
576 return dreq;
577}
578
462d5b32
CL
579static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
580{
581 struct nfs_write_data *data = calldata;
582 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
583 int status = task->tk_status;
584
585 if (nfs_writeback_done(task, data) != 0)
586 return;
462d5b32 587
15ce4a0c
CL
588 spin_lock(&dreq->lock);
589
462d5b32 590 if (likely(status >= 0))
15ce4a0c 591 dreq->count += data->res.count;
462d5b32 592 else
fad61490 593 dreq->error = task->tk_status;
462d5b32 594
fad61490
TM
595 if (data->res.verf->committed != NFS_FILE_SYNC) {
596 switch (dreq->flags) {
597 case 0:
598 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
599 dreq->flags = NFS_ODIRECT_DO_COMMIT;
600 break;
601 case NFS_ODIRECT_DO_COMMIT:
602 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
603 dprintk("NFS: %5u write verify failed\n", task->tk_pid);
604 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
605 }
606 }
607 }
608 /* In case we have to resend */
609 data->args.stable = NFS_FILE_SYNC;
610
611 spin_unlock(&dreq->lock);
612}
613
614/*
615 * NB: Return the value of the first error return code. Subsequent
616 * errors after the first one are ignored.
617 */
618static void nfs_direct_write_release(void *calldata)
619{
620 struct nfs_write_data *data = calldata;
621 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
622
623 spin_lock(&dreq->lock);
15ce4a0c
CL
624 if (--dreq->outstanding) {
625 spin_unlock(&dreq->lock);
626 return;
9eafa8cc 627 }
15ce4a0c
CL
628 spin_unlock(&dreq->lock);
629
fad61490 630 nfs_direct_write_complete(dreq, data->inode);
462d5b32
CL
631}
632
633static const struct rpc_call_ops nfs_write_direct_ops = {
634 .rpc_call_done = nfs_direct_write_result,
fad61490 635 .rpc_release = nfs_direct_write_release,
462d5b32
CL
636};
637
638/*
639 * For each nfs_write_data struct that was allocated on the list, dispatch
640 * an NFS WRITE operation
462d5b32 641 */
fad61490 642static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
462d5b32 643{
c89f2ee5
CL
644 struct file *file = dreq->filp;
645 struct inode *inode = file->f_mapping->host;
646 struct nfs_open_context *ctx = (struct nfs_open_context *)
647 file->private_data;
462d5b32
CL
648 struct list_head *list = &dreq->list;
649 struct page **pages = dreq->pages;
fad61490
TM
650 size_t count = dreq->user_count;
651 loff_t pos = dreq->pos;
462d5b32
CL
652 size_t wsize = NFS_SERVER(inode)->wsize;
653 unsigned int curpage, pgbase;
1da177e4 654
1da177e4 655 curpage = 0;
fad61490 656 pgbase = dreq->user_addr & ~PAGE_MASK;
1da177e4 657 do {
462d5b32
CL
658 struct nfs_write_data *data;
659 size_t bytes;
660
661 bytes = wsize;
662 if (count < wsize)
663 bytes = count;
664
665 data = list_entry(list->next, struct nfs_write_data, pages);
fad61490 666 list_move_tail(&data->pages, &dreq->rewrite_list);
462d5b32
CL
667
668 data->inode = inode;
669 data->cred = ctx->cred;
670 data->args.fh = NFS_FH(inode);
671 data->args.context = ctx;
88467055 672 data->args.offset = pos;
462d5b32
CL
673 data->args.pgbase = pgbase;
674 data->args.pages = &pages[curpage];
675 data->args.count = bytes;
676 data->res.fattr = &data->fattr;
677 data->res.count = bytes;
47989d74 678 data->res.verf = &data->verf;
462d5b32
CL
679
680 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
681 &nfs_write_direct_ops, data);
fad61490 682 NFS_PROTO(inode)->write_setup(data, sync);
1da177e4 683
462d5b32
CL
684 data->task.tk_priority = RPC_PRIORITY_NORMAL;
685 data->task.tk_cookie = (unsigned long) inode;
1da177e4
LT
686
687 lock_kernel();
462d5b32 688 rpc_execute(&data->task);
1da177e4
LT
689 unlock_kernel();
690
462d5b32
CL
691 dfprintk(VFS, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
692 data->task.tk_pid,
693 inode->i_sb->s_id,
694 (long long)NFS_FILEID(inode),
695 bytes,
696 (unsigned long long)data->args.offset);
1da177e4 697
88467055 698 pos += bytes;
462d5b32
CL
699 pgbase += bytes;
700 curpage += pgbase >> PAGE_SHIFT;
701 pgbase &= ~PAGE_MASK;
1da177e4 702
462d5b32
CL
703 count -= bytes;
704 } while (count != 0);
705}
1da177e4 706
88467055 707static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
462d5b32
CL
708{
709 ssize_t result;
710 sigset_t oldset;
c89f2ee5 711 struct inode *inode = iocb->ki_filp->f_mapping->host;
462d5b32
CL
712 struct rpc_clnt *clnt = NFS_CLIENT(inode);
713 struct nfs_direct_req *dreq;
fad61490
TM
714 size_t wsize = NFS_SERVER(inode)->wsize;
715 int sync = 0;
1da177e4 716
fad61490 717 dreq = nfs_direct_write_alloc(count, wsize);
462d5b32
CL
718 if (!dreq)
719 return -ENOMEM;
fad61490
TM
720 if (dreq->commit_data == NULL || count < wsize)
721 sync = FLUSH_STABLE;
1da177e4 722
fad61490
TM
723 dreq->user_addr = user_addr;
724 dreq->user_count = count;
725 dreq->pos = pos;
462d5b32
CL
726 dreq->pages = pages;
727 dreq->npages = nr_pages;
a37ec012 728 igrab(inode);
c89f2ee5
CL
729 dreq->inode = inode;
730 dreq->filp = iocb->ki_filp;
731 if (!is_sync_kiocb(iocb))
732 dreq->iocb = iocb;
1da177e4 733
47989d74
CL
734 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
735
462d5b32 736 nfs_begin_data_update(inode);
1da177e4 737
462d5b32 738 rpc_clnt_sigmask(clnt, &oldset);
fad61490 739 nfs_direct_write_schedule(dreq, sync);
c89f2ee5 740 result = nfs_direct_wait(dreq);
462d5b32 741 rpc_clnt_sigunmask(clnt, &oldset);
1da177e4 742
462d5b32 743 return result;
1da177e4
LT
744}
745
1da177e4
LT
746/**
747 * nfs_file_direct_read - file direct read operation for NFS files
748 * @iocb: target I/O control block
749 * @buf: user's buffer into which to read data
88467055
CL
750 * @count: number of bytes to read
751 * @pos: byte offset in file where reading starts
1da177e4
LT
752 *
753 * We use this function for direct reads instead of calling
754 * generic_file_aio_read() in order to avoid gfar's check to see if
755 * the request starts before the end of the file. For that check
756 * to work, we must generate a GETATTR before each direct read, and
757 * even then there is a window between the GETATTR and the subsequent
88467055 758 * READ where the file size could change. Our preference is simply
1da177e4
LT
759 * to do all reads the application wants, and the server will take
760 * care of managing the end of file boundary.
88467055 761 *
1da177e4
LT
762 * This function also eliminates unnecessarily updating the file's
763 * atime locally, as the NFS server sets the file's atime, and this
764 * client must read the updated atime from the server back into its
765 * cache.
766 */
d4cc948b 767ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1da177e4
LT
768{
769 ssize_t retval = -EINVAL;
0cdd80d0
CL
770 int page_count;
771 struct page **pages;
1da177e4 772 struct file *file = iocb->ki_filp;
1da177e4 773 struct address_space *mapping = file->f_mapping;
1da177e4 774
ce1a8e67 775 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
0bbacc40
CL
776 file->f_dentry->d_parent->d_name.name,
777 file->f_dentry->d_name.name,
ce1a8e67 778 (unsigned long) count, (long long) pos);
1da177e4 779
1da177e4
LT
780 if (count < 0)
781 goto out;
782 retval = -EFAULT;
0cdd80d0 783 if (!access_ok(VERIFY_WRITE, buf, count))
1da177e4
LT
784 goto out;
785 retval = 0;
786 if (!count)
787 goto out;
788
29884df0
TM
789 retval = nfs_sync_mapping(mapping);
790 if (retval)
791 goto out;
1da177e4 792
0cdd80d0
CL
793 page_count = nfs_get_user_pages(READ, (unsigned long) buf,
794 count, &pages);
795 if (page_count < 0) {
796 nfs_free_user_pages(pages, 0, 0);
797 retval = page_count;
798 goto out;
799 }
800
99514f8f 801 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
0cdd80d0 802 pages, page_count);
1da177e4 803 if (retval > 0)
0cdd80d0 804 iocb->ki_pos = pos + retval;
1da177e4
LT
805
806out:
807 return retval;
808}
809
810/**
811 * nfs_file_direct_write - file direct write operation for NFS files
812 * @iocb: target I/O control block
813 * @buf: user's buffer from which to write data
88467055
CL
814 * @count: number of bytes to write
815 * @pos: byte offset in file where writing starts
1da177e4
LT
816 *
817 * We use this function for direct writes instead of calling
818 * generic_file_aio_write() in order to avoid taking the inode
819 * semaphore and updating the i_size. The NFS server will set
820 * the new i_size and this client must read the updated size
821 * back into its cache. We let the server do generic write
822 * parameter checking and report problems.
823 *
824 * We also avoid an unnecessary invocation of generic_osync_inode(),
825 * as it is fairly meaningless to sync the metadata of an NFS file.
826 *
827 * We eliminate local atime updates, see direct read above.
828 *
829 * We avoid unnecessary page cache invalidations for normal cached
830 * readers of this file.
831 *
832 * Note that O_APPEND is not supported for NFS direct writes, as there
833 * is no atomic O_APPEND write facility in the NFS protocol.
834 */
d4cc948b 835ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
1da177e4 836{
ce1a8e67 837 ssize_t retval;
47989d74
CL
838 int page_count;
839 struct page **pages;
1da177e4 840 struct file *file = iocb->ki_filp;
1da177e4 841 struct address_space *mapping = file->f_mapping;
1da177e4 842
ce1a8e67 843 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
0bbacc40 844 file->f_dentry->d_parent->d_name.name,
ce1a8e67
CL
845 file->f_dentry->d_name.name,
846 (unsigned long) count, (long long) pos);
1da177e4 847
ce1a8e67
CL
848 retval = generic_write_checks(file, &pos, &count, 0);
849 if (retval)
1da177e4 850 goto out;
ce1a8e67
CL
851
852 retval = -EINVAL;
853 if ((ssize_t) count < 0)
1da177e4 854 goto out;
1da177e4
LT
855 retval = 0;
856 if (!count)
857 goto out;
ce1a8e67
CL
858
859 retval = -EFAULT;
47989d74 860 if (!access_ok(VERIFY_READ, buf, count))
ce1a8e67 861 goto out;
1da177e4 862
29884df0
TM
863 retval = nfs_sync_mapping(mapping);
864 if (retval)
865 goto out;
1da177e4 866
47989d74
CL
867 page_count = nfs_get_user_pages(WRITE, (unsigned long) buf,
868 count, &pages);
869 if (page_count < 0) {
870 nfs_free_user_pages(pages, 0, 0);
871 retval = page_count;
872 goto out;
873 }
874
c89f2ee5 875 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
47989d74 876 pos, pages, page_count);
9eafa8cc
CL
877
878 /*
879 * XXX: nfs_end_data_update() already ensures this file's
880 * cached data is subsequently invalidated. Do we really
881 * need to call invalidate_inode_pages2() again here?
882 *
883 * For aio writes, this invalidation will almost certainly
884 * occur before the writes complete. Kind of racey.
885 */
1da177e4
LT
886 if (mapping->nrpages)
887 invalidate_inode_pages2(mapping);
9eafa8cc 888
1da177e4 889 if (retval > 0)
ce1a8e67 890 iocb->ki_pos = pos + retval;
1da177e4
LT
891
892out:
893 return retval;
894}
895
88467055
CL
896/**
897 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
898 *
899 */
1da177e4
LT
900int nfs_init_directcache(void)
901{
902 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
903 sizeof(struct nfs_direct_req),
904 0, SLAB_RECLAIM_ACCOUNT,
905 NULL, NULL);
906 if (nfs_direct_cachep == NULL)
907 return -ENOMEM;
908
909 return 0;
910}
911
88467055
CL
912/**
913 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
914 *
915 */
1da177e4
LT
916void nfs_destroy_directcache(void)
917{
918 if (kmem_cache_destroy(nfs_direct_cachep))
919 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
920}
This page took 0.164644 seconds and 5 git commands to generate.