Merge branch 'master' into upstream
[deliverable/linux.git] / fs / nfs / write.c
1 /*
2 * linux/fs/nfs/write.c
3 *
4 * Writing file data over NFS.
5 *
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
12 *
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14 *
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
20 *
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
24 *
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
28 *
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
32 *
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
35 *
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
40 *
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
45 *
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47 */
48
49 #include <linux/types.h>
50 #include <linux/slab.h>
51 #include <linux/mm.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/mpage.h>
55 #include <linux/writeback.h>
56
57 #include <linux/sunrpc/clnt.h>
58 #include <linux/nfs_fs.h>
59 #include <linux/nfs_mount.h>
60 #include <linux/nfs_page.h>
61 #include <asm/uaccess.h>
62 #include <linux/smp_lock.h>
63
64 #include "delegation.h"
65 #include "iostat.h"
66
67 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
68
69 #define MIN_POOL_WRITE (32)
70 #define MIN_POOL_COMMIT (4)
71
72 /*
73 * Local function declarations
74 */
75 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 struct inode *,
77 struct page *,
78 unsigned int, unsigned int);
79 static int nfs_wait_on_write_congestion(struct address_space *, int);
80 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
81 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
82 unsigned int npages, int how);
83 static const struct rpc_call_ops nfs_write_partial_ops;
84 static const struct rpc_call_ops nfs_write_full_ops;
85 static const struct rpc_call_ops nfs_commit_ops;
86
87 static kmem_cache_t *nfs_wdata_cachep;
88 static mempool_t *nfs_wdata_mempool;
89 static mempool_t *nfs_commit_mempool;
90
91 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
92
93 struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
94 {
95 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
96
97 if (p) {
98 memset(p, 0, sizeof(*p));
99 INIT_LIST_HEAD(&p->pages);
100 if (pagecount <= ARRAY_SIZE(p->page_array))
101 p->pagevec = p->page_array;
102 else {
103 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
104 if (!p->pagevec) {
105 mempool_free(p, nfs_commit_mempool);
106 p = NULL;
107 }
108 }
109 }
110 return p;
111 }
112
113 void nfs_commit_free(struct nfs_write_data *p)
114 {
115 if (p && (p->pagevec != &p->page_array[0]))
116 kfree(p->pagevec);
117 mempool_free(p, nfs_commit_mempool);
118 }
119
120 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
121 {
122 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
123
124 if (p) {
125 memset(p, 0, sizeof(*p));
126 INIT_LIST_HEAD(&p->pages);
127 if (pagecount <= ARRAY_SIZE(p->page_array))
128 p->pagevec = p->page_array;
129 else {
130 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
131 if (!p->pagevec) {
132 mempool_free(p, nfs_wdata_mempool);
133 p = NULL;
134 }
135 }
136 }
137 return p;
138 }
139
140 void nfs_writedata_free(struct nfs_write_data *p)
141 {
142 if (p && (p->pagevec != &p->page_array[0]))
143 kfree(p->pagevec);
144 mempool_free(p, nfs_wdata_mempool);
145 }
146
147 void nfs_writedata_release(void *wdata)
148 {
149 nfs_writedata_free(wdata);
150 }
151
152 /* Adjust the file length if we're writing beyond the end */
153 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
154 {
155 struct inode *inode = page->mapping->host;
156 loff_t end, i_size = i_size_read(inode);
157 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
158
159 if (i_size > 0 && page->index < end_index)
160 return;
161 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
162 if (i_size >= end)
163 return;
164 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
165 i_size_write(inode, end);
166 }
167
168 /* We can set the PG_uptodate flag if we see that a write request
169 * covers the full page.
170 */
171 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
172 {
173 loff_t end_offs;
174
175 if (PageUptodate(page))
176 return;
177 if (base != 0)
178 return;
179 if (count == PAGE_CACHE_SIZE) {
180 SetPageUptodate(page);
181 return;
182 }
183
184 end_offs = i_size_read(page->mapping->host) - 1;
185 if (end_offs < 0)
186 return;
187 /* Is this the last page? */
188 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
189 return;
190 /* This is the last page: set PG_uptodate if we cover the entire
191 * extent of the data, then zero the rest of the page.
192 */
193 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
194 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
195 SetPageUptodate(page);
196 }
197 }
198
199 /*
200 * Write a page synchronously.
201 * Offset is the data offset within the page.
202 */
203 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
204 struct page *page, unsigned int offset, unsigned int count,
205 int how)
206 {
207 unsigned int wsize = NFS_SERVER(inode)->wsize;
208 int result, written = 0;
209 struct nfs_write_data *wdata;
210
211 wdata = nfs_writedata_alloc(1);
212 if (!wdata)
213 return -ENOMEM;
214
215 wdata->flags = how;
216 wdata->cred = ctx->cred;
217 wdata->inode = inode;
218 wdata->args.fh = NFS_FH(inode);
219 wdata->args.context = ctx;
220 wdata->args.pages = &page;
221 wdata->args.stable = NFS_FILE_SYNC;
222 wdata->args.pgbase = offset;
223 wdata->args.count = wsize;
224 wdata->res.fattr = &wdata->fattr;
225 wdata->res.verf = &wdata->verf;
226
227 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
228 inode->i_sb->s_id,
229 (long long)NFS_FILEID(inode),
230 count, (long long)(page_offset(page) + offset));
231
232 set_page_writeback(page);
233 nfs_begin_data_update(inode);
234 do {
235 if (count < wsize)
236 wdata->args.count = count;
237 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
238
239 result = NFS_PROTO(inode)->write(wdata);
240
241 if (result < 0) {
242 /* Must mark the page invalid after I/O error */
243 ClearPageUptodate(page);
244 goto io_error;
245 }
246 if (result < wdata->args.count)
247 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
248 wdata->args.count, result);
249
250 wdata->args.offset += result;
251 wdata->args.pgbase += result;
252 written += result;
253 count -= result;
254 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
255 } while (count);
256 /* Update file length */
257 nfs_grow_file(page, offset, written);
258 /* Set the PG_uptodate flag? */
259 nfs_mark_uptodate(page, offset, written);
260
261 if (PageError(page))
262 ClearPageError(page);
263
264 io_error:
265 nfs_end_data_update(inode);
266 end_page_writeback(page);
267 nfs_writedata_free(wdata);
268 return written ? written : result;
269 }
270
271 static int nfs_writepage_async(struct nfs_open_context *ctx,
272 struct inode *inode, struct page *page,
273 unsigned int offset, unsigned int count)
274 {
275 struct nfs_page *req;
276
277 req = nfs_update_request(ctx, inode, page, offset, count);
278 if (IS_ERR(req))
279 return PTR_ERR(req);
280 /* Update file length */
281 nfs_grow_file(page, offset, count);
282 /* Set the PG_uptodate flag? */
283 nfs_mark_uptodate(page, offset, count);
284 nfs_unlock_request(req);
285 return 0;
286 }
287
288 static int wb_priority(struct writeback_control *wbc)
289 {
290 if (wbc->for_reclaim)
291 return FLUSH_HIGHPRI;
292 if (wbc->for_kupdate)
293 return FLUSH_LOWPRI;
294 return 0;
295 }
296
297 /*
298 * Write an mmapped page to the server.
299 */
300 int nfs_writepage(struct page *page, struct writeback_control *wbc)
301 {
302 struct nfs_open_context *ctx;
303 struct inode *inode = page->mapping->host;
304 unsigned long end_index;
305 unsigned offset = PAGE_CACHE_SIZE;
306 loff_t i_size = i_size_read(inode);
307 int inode_referenced = 0;
308 int priority = wb_priority(wbc);
309 int err;
310
311 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
312 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
313
314 /*
315 * Note: We need to ensure that we have a reference to the inode
316 * if we are to do asynchronous writes. If not, waiting
317 * in nfs_wait_on_request() may deadlock with clear_inode().
318 *
319 * If igrab() fails here, then it is in any case safe to
320 * call nfs_wb_page(), since there will be no pending writes.
321 */
322 if (igrab(inode) != 0)
323 inode_referenced = 1;
324 end_index = i_size >> PAGE_CACHE_SHIFT;
325
326 /* Ensure we've flushed out any previous writes */
327 nfs_wb_page_priority(inode, page, priority);
328
329 /* easy case */
330 if (page->index < end_index)
331 goto do_it;
332 /* things got complicated... */
333 offset = i_size & (PAGE_CACHE_SIZE-1);
334
335 /* OK, are we completely out? */
336 err = 0; /* potential race with truncate - ignore */
337 if (page->index >= end_index+1 || !offset)
338 goto out;
339 do_it:
340 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
341 if (ctx == NULL) {
342 err = -EBADF;
343 goto out;
344 }
345 lock_kernel();
346 if (!IS_SYNC(inode) && inode_referenced) {
347 err = nfs_writepage_async(ctx, inode, page, 0, offset);
348 if (!wbc->for_writepages)
349 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
350 } else {
351 err = nfs_writepage_sync(ctx, inode, page, 0,
352 offset, priority);
353 if (err >= 0) {
354 if (err != offset)
355 redirty_page_for_writepage(wbc, page);
356 err = 0;
357 }
358 }
359 unlock_kernel();
360 put_nfs_open_context(ctx);
361 out:
362 unlock_page(page);
363 if (inode_referenced)
364 iput(inode);
365 return err;
366 }
367
368 /*
369 * Note: causes nfs_update_request() to block on the assumption
370 * that the writeback is generated due to memory pressure.
371 */
372 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
373 {
374 struct backing_dev_info *bdi = mapping->backing_dev_info;
375 struct inode *inode = mapping->host;
376 int err;
377
378 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
379
380 err = generic_writepages(mapping, wbc);
381 if (err)
382 return err;
383 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
384 if (wbc->nonblocking)
385 return 0;
386 nfs_wait_on_write_congestion(mapping, 0);
387 }
388 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
389 if (err < 0)
390 goto out;
391 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
392 wbc->nr_to_write -= err;
393 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
394 err = nfs_wait_on_requests(inode, 0, 0);
395 if (err < 0)
396 goto out;
397 }
398 err = nfs_commit_inode(inode, wb_priority(wbc));
399 if (err > 0) {
400 wbc->nr_to_write -= err;
401 err = 0;
402 }
403 out:
404 clear_bit(BDI_write_congested, &bdi->state);
405 wake_up_all(&nfs_write_congestion);
406 return err;
407 }
408
409 /*
410 * Insert a write request into an inode
411 */
412 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
413 {
414 struct nfs_inode *nfsi = NFS_I(inode);
415 int error;
416
417 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
418 BUG_ON(error == -EEXIST);
419 if (error)
420 return error;
421 if (!nfsi->npages) {
422 igrab(inode);
423 nfs_begin_data_update(inode);
424 if (nfs_have_delegation(inode, FMODE_WRITE))
425 nfsi->change_attr++;
426 }
427 SetPagePrivate(req->wb_page);
428 nfsi->npages++;
429 atomic_inc(&req->wb_count);
430 return 0;
431 }
432
433 /*
434 * Insert a write request into an inode
435 */
436 static void nfs_inode_remove_request(struct nfs_page *req)
437 {
438 struct inode *inode = req->wb_context->dentry->d_inode;
439 struct nfs_inode *nfsi = NFS_I(inode);
440
441 BUG_ON (!NFS_WBACK_BUSY(req));
442
443 spin_lock(&nfsi->req_lock);
444 ClearPagePrivate(req->wb_page);
445 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
446 nfsi->npages--;
447 if (!nfsi->npages) {
448 spin_unlock(&nfsi->req_lock);
449 nfs_end_data_update(inode);
450 iput(inode);
451 } else
452 spin_unlock(&nfsi->req_lock);
453 nfs_clear_request(req);
454 nfs_release_request(req);
455 }
456
457 /*
458 * Find a request
459 */
460 static inline struct nfs_page *
461 _nfs_find_request(struct inode *inode, unsigned long index)
462 {
463 struct nfs_inode *nfsi = NFS_I(inode);
464 struct nfs_page *req;
465
466 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
467 if (req)
468 atomic_inc(&req->wb_count);
469 return req;
470 }
471
472 static struct nfs_page *
473 nfs_find_request(struct inode *inode, unsigned long index)
474 {
475 struct nfs_page *req;
476 struct nfs_inode *nfsi = NFS_I(inode);
477
478 spin_lock(&nfsi->req_lock);
479 req = _nfs_find_request(inode, index);
480 spin_unlock(&nfsi->req_lock);
481 return req;
482 }
483
484 /*
485 * Add a request to the inode's dirty list.
486 */
487 static void
488 nfs_mark_request_dirty(struct nfs_page *req)
489 {
490 struct inode *inode = req->wb_context->dentry->d_inode;
491 struct nfs_inode *nfsi = NFS_I(inode);
492
493 spin_lock(&nfsi->req_lock);
494 radix_tree_tag_set(&nfsi->nfs_page_tree,
495 req->wb_index, NFS_PAGE_TAG_DIRTY);
496 nfs_list_add_request(req, &nfsi->dirty);
497 nfsi->ndirty++;
498 spin_unlock(&nfsi->req_lock);
499 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
500 mark_inode_dirty(inode);
501 }
502
503 /*
504 * Check if a request is dirty
505 */
506 static inline int
507 nfs_dirty_request(struct nfs_page *req)
508 {
509 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
510 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
511 }
512
513 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
514 /*
515 * Add a request to the inode's commit list.
516 */
517 static void
518 nfs_mark_request_commit(struct nfs_page *req)
519 {
520 struct inode *inode = req->wb_context->dentry->d_inode;
521 struct nfs_inode *nfsi = NFS_I(inode);
522
523 spin_lock(&nfsi->req_lock);
524 nfs_list_add_request(req, &nfsi->commit);
525 nfsi->ncommit++;
526 spin_unlock(&nfsi->req_lock);
527 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
528 mark_inode_dirty(inode);
529 }
530 #endif
531
532 /*
533 * Wait for a request to complete.
534 *
535 * Interruptible by signals only if mounted with intr flag.
536 */
537 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
538 {
539 struct nfs_inode *nfsi = NFS_I(inode);
540 struct nfs_page *req;
541 unsigned long idx_end, next;
542 unsigned int res = 0;
543 int error;
544
545 if (npages == 0)
546 idx_end = ~0;
547 else
548 idx_end = idx_start + npages - 1;
549
550 next = idx_start;
551 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
552 if (req->wb_index > idx_end)
553 break;
554
555 next = req->wb_index + 1;
556 BUG_ON(!NFS_WBACK_BUSY(req));
557
558 atomic_inc(&req->wb_count);
559 spin_unlock(&nfsi->req_lock);
560 error = nfs_wait_on_request(req);
561 nfs_release_request(req);
562 spin_lock(&nfsi->req_lock);
563 if (error < 0)
564 return error;
565 res++;
566 }
567 return res;
568 }
569
570 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
571 {
572 struct nfs_inode *nfsi = NFS_I(inode);
573 int ret;
574
575 spin_lock(&nfsi->req_lock);
576 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
577 spin_unlock(&nfsi->req_lock);
578 return ret;
579 }
580
581 static void nfs_cancel_dirty_list(struct list_head *head)
582 {
583 struct nfs_page *req;
584 while(!list_empty(head)) {
585 req = nfs_list_entry(head->next);
586 nfs_list_remove_request(req);
587 nfs_inode_remove_request(req);
588 nfs_clear_page_writeback(req);
589 }
590 }
591
592 static void nfs_cancel_commit_list(struct list_head *head)
593 {
594 struct nfs_page *req;
595
596 while(!list_empty(head)) {
597 req = nfs_list_entry(head->next);
598 nfs_list_remove_request(req);
599 nfs_inode_remove_request(req);
600 nfs_clear_page_writeback(req);
601 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
602 }
603 }
604
605 /*
606 * nfs_scan_dirty - Scan an inode for dirty requests
607 * @inode: NFS inode to scan
608 * @dst: destination list
609 * @idx_start: lower bound of page->index to scan.
610 * @npages: idx_start + npages sets the upper bound to scan.
611 *
612 * Moves requests from the inode's dirty page list.
613 * The requests are *not* checked to ensure that they form a contiguous set.
614 */
615 static int
616 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
617 {
618 struct nfs_inode *nfsi = NFS_I(inode);
619 int res = 0;
620
621 if (nfsi->ndirty != 0) {
622 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
623 nfsi->ndirty -= res;
624 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
625 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
626 }
627 return res;
628 }
629
630 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
631 /*
632 * nfs_scan_commit - Scan an inode for commit requests
633 * @inode: NFS inode to scan
634 * @dst: destination list
635 * @idx_start: lower bound of page->index to scan.
636 * @npages: idx_start + npages sets the upper bound to scan.
637 *
638 * Moves requests from the inode's 'commit' request list.
639 * The requests are *not* checked to ensure that they form a contiguous set.
640 */
641 static int
642 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
643 {
644 struct nfs_inode *nfsi = NFS_I(inode);
645 int res = 0;
646
647 if (nfsi->ncommit != 0) {
648 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
649 nfsi->ncommit -= res;
650 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
651 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
652 }
653 return res;
654 }
655 #else
656 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
657 {
658 return 0;
659 }
660 #endif
661
662 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
663 {
664 struct backing_dev_info *bdi = mapping->backing_dev_info;
665 DEFINE_WAIT(wait);
666 int ret = 0;
667
668 might_sleep();
669
670 if (!bdi_write_congested(bdi))
671 return 0;
672
673 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
674
675 if (intr) {
676 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
677 sigset_t oldset;
678
679 rpc_clnt_sigmask(clnt, &oldset);
680 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
681 if (bdi_write_congested(bdi)) {
682 if (signalled())
683 ret = -ERESTARTSYS;
684 else
685 schedule();
686 }
687 rpc_clnt_sigunmask(clnt, &oldset);
688 } else {
689 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
690 if (bdi_write_congested(bdi))
691 schedule();
692 }
693 finish_wait(&nfs_write_congestion, &wait);
694 return ret;
695 }
696
697
698 /*
699 * Try to update any existing write request, or create one if there is none.
700 * In order to match, the request's credentials must match those of
701 * the calling process.
702 *
703 * Note: Should always be called with the Page Lock held!
704 */
705 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
706 struct inode *inode, struct page *page,
707 unsigned int offset, unsigned int bytes)
708 {
709 struct nfs_server *server = NFS_SERVER(inode);
710 struct nfs_inode *nfsi = NFS_I(inode);
711 struct nfs_page *req, *new = NULL;
712 unsigned long rqend, end;
713
714 end = offset + bytes;
715
716 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
717 return ERR_PTR(-ERESTARTSYS);
718 for (;;) {
719 /* Loop over all inode entries and see if we find
720 * A request for the page we wish to update
721 */
722 spin_lock(&nfsi->req_lock);
723 req = _nfs_find_request(inode, page->index);
724 if (req) {
725 if (!nfs_lock_request_dontget(req)) {
726 int error;
727 spin_unlock(&nfsi->req_lock);
728 error = nfs_wait_on_request(req);
729 nfs_release_request(req);
730 if (error < 0) {
731 if (new)
732 nfs_release_request(new);
733 return ERR_PTR(error);
734 }
735 continue;
736 }
737 spin_unlock(&nfsi->req_lock);
738 if (new)
739 nfs_release_request(new);
740 break;
741 }
742
743 if (new) {
744 int error;
745 nfs_lock_request_dontget(new);
746 error = nfs_inode_add_request(inode, new);
747 if (error) {
748 spin_unlock(&nfsi->req_lock);
749 nfs_unlock_request(new);
750 return ERR_PTR(error);
751 }
752 spin_unlock(&nfsi->req_lock);
753 nfs_mark_request_dirty(new);
754 return new;
755 }
756 spin_unlock(&nfsi->req_lock);
757
758 new = nfs_create_request(ctx, inode, page, offset, bytes);
759 if (IS_ERR(new))
760 return new;
761 }
762
763 /* We have a request for our page.
764 * If the creds don't match, or the
765 * page addresses don't match,
766 * tell the caller to wait on the conflicting
767 * request.
768 */
769 rqend = req->wb_offset + req->wb_bytes;
770 if (req->wb_context != ctx
771 || req->wb_page != page
772 || !nfs_dirty_request(req)
773 || offset > rqend || end < req->wb_offset) {
774 nfs_unlock_request(req);
775 return ERR_PTR(-EBUSY);
776 }
777
778 /* Okay, the request matches. Update the region */
779 if (offset < req->wb_offset) {
780 req->wb_offset = offset;
781 req->wb_pgbase = offset;
782 req->wb_bytes = rqend - req->wb_offset;
783 }
784
785 if (end > rqend)
786 req->wb_bytes = end - req->wb_offset;
787
788 return req;
789 }
790
791 int nfs_flush_incompatible(struct file *file, struct page *page)
792 {
793 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
794 struct inode *inode = page->mapping->host;
795 struct nfs_page *req;
796 int status = 0;
797 /*
798 * Look for a request corresponding to this page. If there
799 * is one, and it belongs to another file, we flush it out
800 * before we try to copy anything into the page. Do this
801 * due to the lack of an ACCESS-type call in NFSv2.
802 * Also do the same if we find a request from an existing
803 * dropped page.
804 */
805 req = nfs_find_request(inode, page->index);
806 if (req) {
807 if (req->wb_page != page || ctx != req->wb_context)
808 status = nfs_wb_page(inode, page);
809 nfs_release_request(req);
810 }
811 return (status < 0) ? status : 0;
812 }
813
814 /*
815 * Update and possibly write a cached page of an NFS file.
816 *
817 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
818 * things with a page scheduled for an RPC call (e.g. invalidate it).
819 */
820 int nfs_updatepage(struct file *file, struct page *page,
821 unsigned int offset, unsigned int count)
822 {
823 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
824 struct inode *inode = page->mapping->host;
825 struct nfs_page *req;
826 int status = 0;
827
828 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
829
830 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
831 file->f_dentry->d_parent->d_name.name,
832 file->f_dentry->d_name.name, count,
833 (long long)(page_offset(page) +offset));
834
835 if (IS_SYNC(inode)) {
836 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
837 if (status > 0) {
838 if (offset == 0 && status == PAGE_CACHE_SIZE)
839 SetPageUptodate(page);
840 return 0;
841 }
842 return status;
843 }
844
845 /* If we're not using byte range locks, and we know the page
846 * is entirely in cache, it may be more efficient to avoid
847 * fragmenting write requests.
848 */
849 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
850 loff_t end_offs = i_size_read(inode) - 1;
851 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
852
853 count += offset;
854 offset = 0;
855 if (unlikely(end_offs < 0)) {
856 /* Do nothing */
857 } else if (page->index == end_index) {
858 unsigned int pglen;
859 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
860 if (count < pglen)
861 count = pglen;
862 } else if (page->index < end_index)
863 count = PAGE_CACHE_SIZE;
864 }
865
866 /*
867 * Try to find an NFS request corresponding to this page
868 * and update it.
869 * If the existing request cannot be updated, we must flush
870 * it out now.
871 */
872 do {
873 req = nfs_update_request(ctx, inode, page, offset, count);
874 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
875 if (status != -EBUSY)
876 break;
877 /* Request could not be updated. Flush it out and try again */
878 status = nfs_wb_page(inode, page);
879 } while (status >= 0);
880 if (status < 0)
881 goto done;
882
883 status = 0;
884
885 /* Update file length */
886 nfs_grow_file(page, offset, count);
887 /* Set the PG_uptodate flag? */
888 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
889 nfs_unlock_request(req);
890 done:
891 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
892 status, (long long)i_size_read(inode));
893 if (status < 0)
894 ClearPageUptodate(page);
895 return status;
896 }
897
898 static void nfs_writepage_release(struct nfs_page *req)
899 {
900 end_page_writeback(req->wb_page);
901
902 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
903 if (!PageError(req->wb_page)) {
904 if (NFS_NEED_RESCHED(req)) {
905 nfs_mark_request_dirty(req);
906 goto out;
907 } else if (NFS_NEED_COMMIT(req)) {
908 nfs_mark_request_commit(req);
909 goto out;
910 }
911 }
912 nfs_inode_remove_request(req);
913
914 out:
915 nfs_clear_commit(req);
916 nfs_clear_reschedule(req);
917 #else
918 nfs_inode_remove_request(req);
919 #endif
920 nfs_clear_page_writeback(req);
921 }
922
923 static inline int flush_task_priority(int how)
924 {
925 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
926 case FLUSH_HIGHPRI:
927 return RPC_PRIORITY_HIGH;
928 case FLUSH_LOWPRI:
929 return RPC_PRIORITY_LOW;
930 }
931 return RPC_PRIORITY_NORMAL;
932 }
933
934 /*
935 * Set up the argument/result storage required for the RPC call.
936 */
937 static void nfs_write_rpcsetup(struct nfs_page *req,
938 struct nfs_write_data *data,
939 const struct rpc_call_ops *call_ops,
940 unsigned int count, unsigned int offset,
941 int how)
942 {
943 struct inode *inode;
944 int flags;
945
946 /* Set up the RPC argument and reply structs
947 * NB: take care not to mess about with data->commit et al. */
948
949 data->req = req;
950 data->inode = inode = req->wb_context->dentry->d_inode;
951 data->cred = req->wb_context->cred;
952
953 data->args.fh = NFS_FH(inode);
954 data->args.offset = req_offset(req) + offset;
955 data->args.pgbase = req->wb_pgbase + offset;
956 data->args.pages = data->pagevec;
957 data->args.count = count;
958 data->args.context = req->wb_context;
959
960 data->res.fattr = &data->fattr;
961 data->res.count = count;
962 data->res.verf = &data->verf;
963 nfs_fattr_init(&data->fattr);
964
965 /* Set up the initial task struct. */
966 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
967 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
968 NFS_PROTO(inode)->write_setup(data, how);
969
970 data->task.tk_priority = flush_task_priority(how);
971 data->task.tk_cookie = (unsigned long)inode;
972
973 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
974 data->task.tk_pid,
975 inode->i_sb->s_id,
976 (long long)NFS_FILEID(inode),
977 count,
978 (unsigned long long)data->args.offset);
979 }
980
981 static void nfs_execute_write(struct nfs_write_data *data)
982 {
983 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
984 sigset_t oldset;
985
986 rpc_clnt_sigmask(clnt, &oldset);
987 lock_kernel();
988 rpc_execute(&data->task);
989 unlock_kernel();
990 rpc_clnt_sigunmask(clnt, &oldset);
991 }
992
993 /*
994 * Generate multiple small requests to write out a single
995 * contiguous dirty area on one page.
996 */
997 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
998 {
999 struct nfs_page *req = nfs_list_entry(head->next);
1000 struct page *page = req->wb_page;
1001 struct nfs_write_data *data;
1002 unsigned int wsize = NFS_SERVER(inode)->wsize;
1003 unsigned int nbytes, offset;
1004 int requests = 0;
1005 LIST_HEAD(list);
1006
1007 nfs_list_remove_request(req);
1008
1009 nbytes = req->wb_bytes;
1010 for (;;) {
1011 data = nfs_writedata_alloc(1);
1012 if (!data)
1013 goto out_bad;
1014 list_add(&data->pages, &list);
1015 requests++;
1016 if (nbytes <= wsize)
1017 break;
1018 nbytes -= wsize;
1019 }
1020 atomic_set(&req->wb_complete, requests);
1021
1022 ClearPageError(page);
1023 set_page_writeback(page);
1024 offset = 0;
1025 nbytes = req->wb_bytes;
1026 do {
1027 data = list_entry(list.next, struct nfs_write_data, pages);
1028 list_del_init(&data->pages);
1029
1030 data->pagevec[0] = page;
1031
1032 if (nbytes > wsize) {
1033 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1034 wsize, offset, how);
1035 offset += wsize;
1036 nbytes -= wsize;
1037 } else {
1038 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1039 nbytes, offset, how);
1040 nbytes = 0;
1041 }
1042 nfs_execute_write(data);
1043 } while (nbytes != 0);
1044
1045 return 0;
1046
1047 out_bad:
1048 while (!list_empty(&list)) {
1049 data = list_entry(list.next, struct nfs_write_data, pages);
1050 list_del(&data->pages);
1051 nfs_writedata_free(data);
1052 }
1053 nfs_mark_request_dirty(req);
1054 nfs_clear_page_writeback(req);
1055 return -ENOMEM;
1056 }
1057
1058 /*
1059 * Create an RPC task for the given write request and kick it.
1060 * The page must have been locked by the caller.
1061 *
1062 * It may happen that the page we're passed is not marked dirty.
1063 * This is the case if nfs_updatepage detects a conflicting request
1064 * that has been written but not committed.
1065 */
1066 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1067 {
1068 struct nfs_page *req;
1069 struct page **pages;
1070 struct nfs_write_data *data;
1071 unsigned int count;
1072
1073 data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
1074 if (!data)
1075 goto out_bad;
1076
1077 pages = data->pagevec;
1078 count = 0;
1079 while (!list_empty(head)) {
1080 req = nfs_list_entry(head->next);
1081 nfs_list_remove_request(req);
1082 nfs_list_add_request(req, &data->pages);
1083 ClearPageError(req->wb_page);
1084 set_page_writeback(req->wb_page);
1085 *pages++ = req->wb_page;
1086 count += req->wb_bytes;
1087 }
1088 req = nfs_list_entry(data->pages.next);
1089
1090 /* Set up the argument struct */
1091 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1092
1093 nfs_execute_write(data);
1094 return 0;
1095 out_bad:
1096 while (!list_empty(head)) {
1097 struct nfs_page *req = nfs_list_entry(head->next);
1098 nfs_list_remove_request(req);
1099 nfs_mark_request_dirty(req);
1100 nfs_clear_page_writeback(req);
1101 }
1102 return -ENOMEM;
1103 }
1104
1105 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1106 {
1107 LIST_HEAD(one_request);
1108 int (*flush_one)(struct inode *, struct list_head *, int);
1109 struct nfs_page *req;
1110 int wpages = NFS_SERVER(inode)->wpages;
1111 int wsize = NFS_SERVER(inode)->wsize;
1112 int error;
1113
1114 flush_one = nfs_flush_one;
1115 if (wsize < PAGE_CACHE_SIZE)
1116 flush_one = nfs_flush_multi;
1117 /* For single writes, FLUSH_STABLE is more efficient */
1118 if (npages <= wpages && npages == NFS_I(inode)->npages
1119 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1120 how |= FLUSH_STABLE;
1121
1122 do {
1123 nfs_coalesce_requests(head, &one_request, wpages);
1124 req = nfs_list_entry(one_request.next);
1125 error = flush_one(inode, &one_request, how);
1126 if (error < 0)
1127 goto out_err;
1128 } while (!list_empty(head));
1129 return 0;
1130 out_err:
1131 while (!list_empty(head)) {
1132 req = nfs_list_entry(head->next);
1133 nfs_list_remove_request(req);
1134 nfs_mark_request_dirty(req);
1135 nfs_clear_page_writeback(req);
1136 }
1137 return error;
1138 }
1139
1140 /*
1141 * Handle a write reply that flushed part of a page.
1142 */
1143 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1144 {
1145 struct nfs_write_data *data = calldata;
1146 struct nfs_page *req = data->req;
1147 struct page *page = req->wb_page;
1148
1149 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1150 req->wb_context->dentry->d_inode->i_sb->s_id,
1151 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1152 req->wb_bytes,
1153 (long long)req_offset(req));
1154
1155 if (nfs_writeback_done(task, data) != 0)
1156 return;
1157
1158 if (task->tk_status < 0) {
1159 ClearPageUptodate(page);
1160 SetPageError(page);
1161 req->wb_context->error = task->tk_status;
1162 dprintk(", error = %d\n", task->tk_status);
1163 } else {
1164 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1165 if (data->verf.committed < NFS_FILE_SYNC) {
1166 if (!NFS_NEED_COMMIT(req)) {
1167 nfs_defer_commit(req);
1168 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1169 dprintk(" defer commit\n");
1170 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1171 nfs_defer_reschedule(req);
1172 dprintk(" server reboot detected\n");
1173 }
1174 } else
1175 #endif
1176 dprintk(" OK\n");
1177 }
1178
1179 if (atomic_dec_and_test(&req->wb_complete))
1180 nfs_writepage_release(req);
1181 }
1182
1183 static const struct rpc_call_ops nfs_write_partial_ops = {
1184 .rpc_call_done = nfs_writeback_done_partial,
1185 .rpc_release = nfs_writedata_release,
1186 };
1187
1188 /*
1189 * Handle a write reply that flushes a whole page.
1190 *
1191 * FIXME: There is an inherent race with invalidate_inode_pages and
1192 * writebacks since the page->count is kept > 1 for as long
1193 * as the page has a write request pending.
1194 */
1195 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1196 {
1197 struct nfs_write_data *data = calldata;
1198 struct nfs_page *req;
1199 struct page *page;
1200
1201 if (nfs_writeback_done(task, data) != 0)
1202 return;
1203
1204 /* Update attributes as result of writeback. */
1205 while (!list_empty(&data->pages)) {
1206 req = nfs_list_entry(data->pages.next);
1207 nfs_list_remove_request(req);
1208 page = req->wb_page;
1209
1210 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1211 req->wb_context->dentry->d_inode->i_sb->s_id,
1212 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1213 req->wb_bytes,
1214 (long long)req_offset(req));
1215
1216 if (task->tk_status < 0) {
1217 ClearPageUptodate(page);
1218 SetPageError(page);
1219 req->wb_context->error = task->tk_status;
1220 end_page_writeback(page);
1221 nfs_inode_remove_request(req);
1222 dprintk(", error = %d\n", task->tk_status);
1223 goto next;
1224 }
1225 end_page_writeback(page);
1226
1227 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1228 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1229 nfs_inode_remove_request(req);
1230 dprintk(" OK\n");
1231 goto next;
1232 }
1233 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1234 nfs_mark_request_commit(req);
1235 dprintk(" marked for commit\n");
1236 #else
1237 nfs_inode_remove_request(req);
1238 #endif
1239 next:
1240 nfs_clear_page_writeback(req);
1241 }
1242 }
1243
1244 static const struct rpc_call_ops nfs_write_full_ops = {
1245 .rpc_call_done = nfs_writeback_done_full,
1246 .rpc_release = nfs_writedata_release,
1247 };
1248
1249
1250 /*
1251 * This function is called when the WRITE call is complete.
1252 */
1253 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1254 {
1255 struct nfs_writeargs *argp = &data->args;
1256 struct nfs_writeres *resp = &data->res;
1257 int status;
1258
1259 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1260 task->tk_pid, task->tk_status);
1261
1262 /* Call the NFS version-specific code */
1263 status = NFS_PROTO(data->inode)->write_done(task, data);
1264 if (status != 0)
1265 return status;
1266 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1267
1268 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1269 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1270 /* We tried a write call, but the server did not
1271 * commit data to stable storage even though we
1272 * requested it.
1273 * Note: There is a known bug in Tru64 < 5.0 in which
1274 * the server reports NFS_DATA_SYNC, but performs
1275 * NFS_FILE_SYNC. We therefore implement this checking
1276 * as a dprintk() in order to avoid filling syslog.
1277 */
1278 static unsigned long complain;
1279
1280 if (time_before(complain, jiffies)) {
1281 dprintk("NFS: faulty NFS server %s:"
1282 " (committed = %d) != (stable = %d)\n",
1283 NFS_SERVER(data->inode)->hostname,
1284 resp->verf->committed, argp->stable);
1285 complain = jiffies + 300 * HZ;
1286 }
1287 }
1288 #endif
1289 /* Is this a short write? */
1290 if (task->tk_status >= 0 && resp->count < argp->count) {
1291 static unsigned long complain;
1292
1293 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1294
1295 /* Has the server at least made some progress? */
1296 if (resp->count != 0) {
1297 /* Was this an NFSv2 write or an NFSv3 stable write? */
1298 if (resp->verf->committed != NFS_UNSTABLE) {
1299 /* Resend from where the server left off */
1300 argp->offset += resp->count;
1301 argp->pgbase += resp->count;
1302 argp->count -= resp->count;
1303 } else {
1304 /* Resend as a stable write in order to avoid
1305 * headaches in the case of a server crash.
1306 */
1307 argp->stable = NFS_FILE_SYNC;
1308 }
1309 rpc_restart_call(task);
1310 return -EAGAIN;
1311 }
1312 if (time_before(complain, jiffies)) {
1313 printk(KERN_WARNING
1314 "NFS: Server wrote zero bytes, expected %u.\n",
1315 argp->count);
1316 complain = jiffies + 300 * HZ;
1317 }
1318 /* Can't do anything about it except throw an error. */
1319 task->tk_status = -EIO;
1320 }
1321 return 0;
1322 }
1323
1324
1325 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1326 void nfs_commit_release(void *wdata)
1327 {
1328 nfs_commit_free(wdata);
1329 }
1330
1331 /*
1332 * Set up the argument/result storage required for the RPC call.
1333 */
1334 static void nfs_commit_rpcsetup(struct list_head *head,
1335 struct nfs_write_data *data,
1336 int how)
1337 {
1338 struct nfs_page *first;
1339 struct inode *inode;
1340 int flags;
1341
1342 /* Set up the RPC argument and reply structs
1343 * NB: take care not to mess about with data->commit et al. */
1344
1345 list_splice_init(head, &data->pages);
1346 first = nfs_list_entry(data->pages.next);
1347 inode = first->wb_context->dentry->d_inode;
1348
1349 data->inode = inode;
1350 data->cred = first->wb_context->cred;
1351
1352 data->args.fh = NFS_FH(data->inode);
1353 /* Note: we always request a commit of the entire inode */
1354 data->args.offset = 0;
1355 data->args.count = 0;
1356 data->res.count = 0;
1357 data->res.fattr = &data->fattr;
1358 data->res.verf = &data->verf;
1359 nfs_fattr_init(&data->fattr);
1360
1361 /* Set up the initial task struct. */
1362 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1363 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1364 NFS_PROTO(inode)->commit_setup(data, how);
1365
1366 data->task.tk_priority = flush_task_priority(how);
1367 data->task.tk_cookie = (unsigned long)inode;
1368
1369 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1370 }
1371
1372 /*
1373 * Commit dirty pages
1374 */
1375 static int
1376 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1377 {
1378 struct nfs_write_data *data;
1379 struct nfs_page *req;
1380
1381 data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
1382
1383 if (!data)
1384 goto out_bad;
1385
1386 /* Set up the argument struct */
1387 nfs_commit_rpcsetup(head, data, how);
1388
1389 nfs_execute_write(data);
1390 return 0;
1391 out_bad:
1392 while (!list_empty(head)) {
1393 req = nfs_list_entry(head->next);
1394 nfs_list_remove_request(req);
1395 nfs_mark_request_commit(req);
1396 nfs_clear_page_writeback(req);
1397 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1398 }
1399 return -ENOMEM;
1400 }
1401
1402 /*
1403 * COMMIT call returned
1404 */
1405 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1406 {
1407 struct nfs_write_data *data = calldata;
1408 struct nfs_page *req;
1409
1410 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1411 task->tk_pid, task->tk_status);
1412
1413 /* Call the NFS version-specific code */
1414 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1415 return;
1416
1417 while (!list_empty(&data->pages)) {
1418 req = nfs_list_entry(data->pages.next);
1419 nfs_list_remove_request(req);
1420 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1421
1422 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1423 req->wb_context->dentry->d_inode->i_sb->s_id,
1424 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1425 req->wb_bytes,
1426 (long long)req_offset(req));
1427 if (task->tk_status < 0) {
1428 req->wb_context->error = task->tk_status;
1429 nfs_inode_remove_request(req);
1430 dprintk(", error = %d\n", task->tk_status);
1431 goto next;
1432 }
1433
1434 /* Okay, COMMIT succeeded, apparently. Check the verifier
1435 * returned by the server against all stored verfs. */
1436 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1437 /* We have a match */
1438 nfs_inode_remove_request(req);
1439 dprintk(" OK\n");
1440 goto next;
1441 }
1442 /* We have a mismatch. Write the page again */
1443 dprintk(" mismatch\n");
1444 nfs_mark_request_dirty(req);
1445 next:
1446 nfs_clear_page_writeback(req);
1447 }
1448 }
1449
1450 static const struct rpc_call_ops nfs_commit_ops = {
1451 .rpc_call_done = nfs_commit_done,
1452 .rpc_release = nfs_commit_release,
1453 };
1454 #else
1455 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1456 {
1457 return 0;
1458 }
1459 #endif
1460
1461 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1462 unsigned int npages, int how)
1463 {
1464 struct nfs_inode *nfsi = NFS_I(inode);
1465 LIST_HEAD(head);
1466 int res;
1467
1468 spin_lock(&nfsi->req_lock);
1469 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1470 spin_unlock(&nfsi->req_lock);
1471 if (res) {
1472 int error = nfs_flush_list(inode, &head, res, how);
1473 if (error < 0)
1474 return error;
1475 }
1476 return res;
1477 }
1478
1479 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1480 int nfs_commit_inode(struct inode *inode, int how)
1481 {
1482 struct nfs_inode *nfsi = NFS_I(inode);
1483 LIST_HEAD(head);
1484 int res;
1485
1486 spin_lock(&nfsi->req_lock);
1487 res = nfs_scan_commit(inode, &head, 0, 0);
1488 spin_unlock(&nfsi->req_lock);
1489 if (res) {
1490 int error = nfs_commit_list(inode, &head, how);
1491 if (error < 0)
1492 return error;
1493 }
1494 return res;
1495 }
1496 #endif
1497
1498 int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1499 unsigned int npages, int how)
1500 {
1501 struct nfs_inode *nfsi = NFS_I(inode);
1502 LIST_HEAD(head);
1503 int nocommit = how & FLUSH_NOCOMMIT;
1504 int pages, ret;
1505
1506 how &= ~FLUSH_NOCOMMIT;
1507 spin_lock(&nfsi->req_lock);
1508 do {
1509 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1510 if (ret != 0)
1511 continue;
1512 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1513 if (pages != 0) {
1514 spin_unlock(&nfsi->req_lock);
1515 if (how & FLUSH_INVALIDATE)
1516 nfs_cancel_dirty_list(&head);
1517 else
1518 ret = nfs_flush_list(inode, &head, pages, how);
1519 spin_lock(&nfsi->req_lock);
1520 continue;
1521 }
1522 if (nocommit)
1523 break;
1524 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1525 if (pages == 0)
1526 break;
1527 if (how & FLUSH_INVALIDATE) {
1528 spin_unlock(&nfsi->req_lock);
1529 nfs_cancel_commit_list(&head);
1530 spin_lock(&nfsi->req_lock);
1531 continue;
1532 }
1533 pages += nfs_scan_commit(inode, &head, 0, 0);
1534 spin_unlock(&nfsi->req_lock);
1535 ret = nfs_commit_list(inode, &head, how);
1536 spin_lock(&nfsi->req_lock);
1537 } while (ret >= 0);
1538 spin_unlock(&nfsi->req_lock);
1539 return ret;
1540 }
1541
1542 int __init nfs_init_writepagecache(void)
1543 {
1544 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1545 sizeof(struct nfs_write_data),
1546 0, SLAB_HWCACHE_ALIGN,
1547 NULL, NULL);
1548 if (nfs_wdata_cachep == NULL)
1549 return -ENOMEM;
1550
1551 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1552 nfs_wdata_cachep);
1553 if (nfs_wdata_mempool == NULL)
1554 return -ENOMEM;
1555
1556 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1557 nfs_wdata_cachep);
1558 if (nfs_commit_mempool == NULL)
1559 return -ENOMEM;
1560
1561 return 0;
1562 }
1563
1564 void nfs_destroy_writepagecache(void)
1565 {
1566 mempool_destroy(nfs_commit_mempool);
1567 mempool_destroy(nfs_wdata_mempool);
1568 if (kmem_cache_destroy(nfs_wdata_cachep))
1569 printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1570 }
1571
This page took 0.091399 seconds and 6 git commands to generate.