Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / fs / nfs / file.c
1 /*
2 * linux/fs/nfs/file.c
3 *
4 * Copyright (C) 1992 Rick Sladkey
5 *
6 * Changes Copyright (C) 1994 by Florian La Roche
7 * - Do not copy data too often around in the kernel.
8 * - In nfs_file_read the return value of kmalloc wasn't checked.
9 * - Put in a better version of read look-ahead buffering. Original idea
10 * and implementation by Wai S Kok elekokws@ee.nus.sg.
11 *
12 * Expire cache on write to a file by Wai S Kok (Oct 1994).
13 *
14 * Total rewrite of read side for new NFS buffer cache.. Linus.
15 *
16 * nfs regular file handling functions
17 */
18
19 #include <linux/time.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/fcntl.h>
23 #include <linux/stat.h>
24 #include <linux/nfs_fs.h>
25 #include <linux/nfs_mount.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/pagemap.h>
29 #include <linux/smp_lock.h>
30 #include <linux/aio.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34
35 #include "delegation.h"
36 #include "iostat.h"
37
38 #define NFSDBG_FACILITY NFSDBG_FILE
39
40 static int nfs_file_open(struct inode *, struct file *);
41 static int nfs_file_release(struct inode *, struct file *);
42 static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
43 static int nfs_file_mmap(struct file *, struct vm_area_struct *);
44 static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
45 static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
46 unsigned long nr_segs, loff_t pos);
47 static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
48 unsigned long nr_segs, loff_t pos);
49 static int nfs_file_flush(struct file *, fl_owner_t id);
50 static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
51 static int nfs_check_flags(int flags);
52 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
53 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
54
55 const struct file_operations nfs_file_operations = {
56 .llseek = nfs_file_llseek,
57 .read = do_sync_read,
58 .write = do_sync_write,
59 .aio_read = nfs_file_read,
60 .aio_write = nfs_file_write,
61 .mmap = nfs_file_mmap,
62 .open = nfs_file_open,
63 .flush = nfs_file_flush,
64 .release = nfs_file_release,
65 .fsync = nfs_fsync,
66 .lock = nfs_lock,
67 .flock = nfs_flock,
68 .sendfile = nfs_file_sendfile,
69 .check_flags = nfs_check_flags,
70 };
71
72 const struct inode_operations nfs_file_inode_operations = {
73 .permission = nfs_permission,
74 .getattr = nfs_getattr,
75 .setattr = nfs_setattr,
76 };
77
78 #ifdef CONFIG_NFS_V3
79 const struct inode_operations nfs3_file_inode_operations = {
80 .permission = nfs_permission,
81 .getattr = nfs_getattr,
82 .setattr = nfs_setattr,
83 .listxattr = nfs3_listxattr,
84 .getxattr = nfs3_getxattr,
85 .setxattr = nfs3_setxattr,
86 .removexattr = nfs3_removexattr,
87 };
88 #endif /* CONFIG_NFS_v3 */
89
90 /* Hack for future NFS swap support */
91 #ifndef IS_SWAPFILE
92 # define IS_SWAPFILE(inode) (0)
93 #endif
94
95 static int nfs_check_flags(int flags)
96 {
97 if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
98 return -EINVAL;
99
100 return 0;
101 }
102
103 /*
104 * Open file
105 */
106 static int
107 nfs_file_open(struct inode *inode, struct file *filp)
108 {
109 int res;
110
111 res = nfs_check_flags(filp->f_flags);
112 if (res)
113 return res;
114
115 nfs_inc_stats(inode, NFSIOS_VFSOPEN);
116 lock_kernel();
117 res = NFS_PROTO(inode)->file_open(inode, filp);
118 unlock_kernel();
119 return res;
120 }
121
122 static int
123 nfs_file_release(struct inode *inode, struct file *filp)
124 {
125 /* Ensure that dirty pages are flushed out with the right creds */
126 if (filp->f_mode & FMODE_WRITE)
127 filemap_fdatawrite(filp->f_mapping);
128 nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
129 return NFS_PROTO(inode)->file_release(inode, filp);
130 }
131
132 /**
133 * nfs_revalidate_size - Revalidate the file size
134 * @inode - pointer to inode struct
135 * @file - pointer to struct file
136 *
137 * Revalidates the file length. This is basically a wrapper around
138 * nfs_revalidate_inode() that takes into account the fact that we may
139 * have cached writes (in which case we don't care about the server's
140 * idea of what the file length is), or O_DIRECT (in which case we
141 * shouldn't trust the cache).
142 */
143 static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
144 {
145 struct nfs_server *server = NFS_SERVER(inode);
146 struct nfs_inode *nfsi = NFS_I(inode);
147
148 if (server->flags & NFS_MOUNT_NOAC)
149 goto force_reval;
150 if (filp->f_flags & O_DIRECT)
151 goto force_reval;
152 if (nfsi->npages != 0)
153 return 0;
154 if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
155 return 0;
156 force_reval:
157 return __nfs_revalidate_inode(server, inode);
158 }
159
160 static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
161 {
162 /* origin == SEEK_END => we must revalidate the cached file length */
163 if (origin == SEEK_END) {
164 struct inode *inode = filp->f_mapping->host;
165 int retval = nfs_revalidate_file_size(inode, filp);
166 if (retval < 0)
167 return (loff_t)retval;
168 }
169 return remote_llseek(filp, offset, origin);
170 }
171
172 /*
173 * Flush all dirty pages, and check for write errors.
174 *
175 */
176 static int
177 nfs_file_flush(struct file *file, fl_owner_t id)
178 {
179 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
180 struct inode *inode = file->f_path.dentry->d_inode;
181 int status;
182
183 dfprintk(VFS, "nfs: flush(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
184
185 if ((file->f_mode & FMODE_WRITE) == 0)
186 return 0;
187 nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
188 lock_kernel();
189 /* Ensure that data+attribute caches are up to date after close() */
190 status = nfs_wb_all(inode);
191 if (!status) {
192 status = ctx->error;
193 ctx->error = 0;
194 if (!status)
195 nfs_revalidate_inode(NFS_SERVER(inode), inode);
196 }
197 unlock_kernel();
198 return status;
199 }
200
201 static ssize_t
202 nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
203 unsigned long nr_segs, loff_t pos)
204 {
205 struct dentry * dentry = iocb->ki_filp->f_path.dentry;
206 struct inode * inode = dentry->d_inode;
207 ssize_t result;
208 size_t count = iov_length(iov, nr_segs);
209
210 #ifdef CONFIG_NFS_DIRECTIO
211 if (iocb->ki_filp->f_flags & O_DIRECT)
212 return nfs_file_direct_read(iocb, iov, nr_segs, pos);
213 #endif
214
215 dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
216 dentry->d_parent->d_name.name, dentry->d_name.name,
217 (unsigned long) count, (unsigned long) pos);
218
219 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
220 nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
221 if (!result)
222 result = generic_file_aio_read(iocb, iov, nr_segs, pos);
223 return result;
224 }
225
226 static ssize_t
227 nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
228 read_actor_t actor, void *target)
229 {
230 struct dentry *dentry = filp->f_path.dentry;
231 struct inode *inode = dentry->d_inode;
232 ssize_t res;
233
234 dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n",
235 dentry->d_parent->d_name.name, dentry->d_name.name,
236 (unsigned long) count, (unsigned long long) *ppos);
237
238 res = nfs_revalidate_mapping(inode, filp->f_mapping);
239 if (!res)
240 res = generic_file_sendfile(filp, ppos, count, actor, target);
241 return res;
242 }
243
244 static int
245 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
246 {
247 struct dentry *dentry = file->f_path.dentry;
248 struct inode *inode = dentry->d_inode;
249 int status;
250
251 dfprintk(VFS, "nfs: mmap(%s/%s)\n",
252 dentry->d_parent->d_name.name, dentry->d_name.name);
253
254 status = nfs_revalidate_mapping(inode, file->f_mapping);
255 if (!status)
256 status = generic_file_mmap(file, vma);
257 return status;
258 }
259
260 /*
261 * Flush any dirty pages for this process, and check for write errors.
262 * The return status from this call provides a reliable indication of
263 * whether any write errors occurred for this process.
264 */
265 static int
266 nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
267 {
268 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
269 struct inode *inode = dentry->d_inode;
270 int status;
271
272 dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
273
274 nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
275 lock_kernel();
276 status = nfs_wb_all(inode);
277 if (!status) {
278 status = ctx->error;
279 ctx->error = 0;
280 }
281 unlock_kernel();
282 return status;
283 }
284
285 /*
286 * This does the "real" work of the write. The generic routine has
287 * allocated the page, locked it, done all the page alignment stuff
288 * calculations etc. Now we should just copy the data from user
289 * space and write it back to the real medium..
290 *
291 * If the writer ends up delaying the write, the writer needs to
292 * increment the page use counts until he is done with the page.
293 */
294 static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
295 {
296 return nfs_flush_incompatible(file, page);
297 }
298
299 static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
300 {
301 long status;
302
303 lock_kernel();
304 status = nfs_updatepage(file, page, offset, to-offset);
305 unlock_kernel();
306 return status;
307 }
308
309 static void nfs_invalidate_page(struct page *page, unsigned long offset)
310 {
311 if (offset != 0)
312 return;
313 /* Cancel any unstarted writes on this page */
314 nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
315 }
316
317 static int nfs_release_page(struct page *page, gfp_t gfp)
318 {
319 /* If PagePrivate() is set, then the page is not freeable */
320 return 0;
321 }
322
323 static int nfs_launder_page(struct page *page)
324 {
325 return nfs_wb_page(page->mapping->host, page);
326 }
327
328 const struct address_space_operations nfs_file_aops = {
329 .readpage = nfs_readpage,
330 .readpages = nfs_readpages,
331 .set_page_dirty = nfs_set_page_dirty,
332 .writepage = nfs_writepage,
333 .writepages = nfs_writepages,
334 .prepare_write = nfs_prepare_write,
335 .commit_write = nfs_commit_write,
336 .invalidatepage = nfs_invalidate_page,
337 .releasepage = nfs_release_page,
338 #ifdef CONFIG_NFS_DIRECTIO
339 .direct_IO = nfs_direct_IO,
340 #endif
341 .launder_page = nfs_launder_page,
342 };
343
344 static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
345 unsigned long nr_segs, loff_t pos)
346 {
347 struct dentry * dentry = iocb->ki_filp->f_path.dentry;
348 struct inode * inode = dentry->d_inode;
349 ssize_t result;
350 size_t count = iov_length(iov, nr_segs);
351
352 #ifdef CONFIG_NFS_DIRECTIO
353 if (iocb->ki_filp->f_flags & O_DIRECT)
354 return nfs_file_direct_write(iocb, iov, nr_segs, pos);
355 #endif
356
357 dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%Ld)\n",
358 dentry->d_parent->d_name.name, dentry->d_name.name,
359 inode->i_ino, (unsigned long) count, (long long) pos);
360
361 result = -EBUSY;
362 if (IS_SWAPFILE(inode))
363 goto out_swapfile;
364 /*
365 * O_APPEND implies that we must revalidate the file length.
366 */
367 if (iocb->ki_filp->f_flags & O_APPEND) {
368 result = nfs_revalidate_file_size(inode, iocb->ki_filp);
369 if (result)
370 goto out;
371 }
372
373 result = count;
374 if (!count)
375 goto out;
376
377 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
378 result = generic_file_aio_write(iocb, iov, nr_segs, pos);
379 /* Return error values for O_SYNC and IS_SYNC() */
380 if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) {
381 int err = nfs_fsync(iocb->ki_filp, dentry, 1);
382 if (err < 0)
383 result = err;
384 }
385 out:
386 return result;
387
388 out_swapfile:
389 printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
390 goto out;
391 }
392
393 static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
394 {
395 struct inode *inode = filp->f_mapping->host;
396 int status = 0;
397
398 lock_kernel();
399 /* Try local locking first */
400 if (posix_test_lock(filp, fl)) {
401 goto out;
402 }
403
404 if (nfs_have_delegation(inode, FMODE_READ))
405 goto out_noconflict;
406
407 if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
408 goto out_noconflict;
409
410 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
411 out:
412 unlock_kernel();
413 return status;
414 out_noconflict:
415 fl->fl_type = F_UNLCK;
416 goto out;
417 }
418
419 static int do_vfs_lock(struct file *file, struct file_lock *fl)
420 {
421 int res = 0;
422 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
423 case FL_POSIX:
424 res = posix_lock_file_wait(file, fl);
425 break;
426 case FL_FLOCK:
427 res = flock_lock_file_wait(file, fl);
428 break;
429 default:
430 BUG();
431 }
432 if (res < 0)
433 dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
434 " - error %d!\n",
435 __FUNCTION__, res);
436 return res;
437 }
438
439 static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
440 {
441 struct inode *inode = filp->f_mapping->host;
442 int status;
443
444 /*
445 * Flush all pending writes before doing anything
446 * with locks..
447 */
448 nfs_sync_mapping(filp->f_mapping);
449
450 /* NOTE: special case
451 * If we're signalled while cleaning up locks on process exit, we
452 * still need to complete the unlock.
453 */
454 lock_kernel();
455 /* Use local locking if mounted with "-onolock" */
456 if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
457 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
458 else
459 status = do_vfs_lock(filp, fl);
460 unlock_kernel();
461 return status;
462 }
463
464 static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
465 {
466 struct inode *inode = filp->f_mapping->host;
467 int status;
468
469 /*
470 * Flush all pending writes before doing anything
471 * with locks..
472 */
473 status = nfs_sync_mapping(filp->f_mapping);
474 if (status != 0)
475 goto out;
476
477 lock_kernel();
478 /* Use local locking if mounted with "-onolock" */
479 if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) {
480 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
481 /* If we were signalled we still need to ensure that
482 * we clean up any state on the server. We therefore
483 * record the lock call as having succeeded in order to
484 * ensure that locks_remove_posix() cleans it out when
485 * the process exits.
486 */
487 if (status == -EINTR || status == -ERESTARTSYS)
488 do_vfs_lock(filp, fl);
489 } else
490 status = do_vfs_lock(filp, fl);
491 unlock_kernel();
492 if (status < 0)
493 goto out;
494 /*
495 * Make sure we clear the cache whenever we try to get the lock.
496 * This makes locking act as a cache coherency point.
497 */
498 nfs_sync_mapping(filp->f_mapping);
499 nfs_zap_caches(inode);
500 out:
501 return status;
502 }
503
504 /*
505 * Lock a (portion of) a file
506 */
507 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
508 {
509 struct inode * inode = filp->f_mapping->host;
510
511 dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
512 inode->i_sb->s_id, inode->i_ino,
513 fl->fl_type, fl->fl_flags,
514 (long long)fl->fl_start, (long long)fl->fl_end);
515 nfs_inc_stats(inode, NFSIOS_VFSLOCK);
516
517 /* No mandatory locks over NFS */
518 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
519 fl->fl_type != F_UNLCK)
520 return -ENOLCK;
521
522 if (IS_GETLK(cmd))
523 return do_getlk(filp, cmd, fl);
524 if (fl->fl_type == F_UNLCK)
525 return do_unlk(filp, cmd, fl);
526 return do_setlk(filp, cmd, fl);
527 }
528
529 /*
530 * Lock a (portion of) a file
531 */
532 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
533 {
534 dprintk("NFS: nfs_flock(f=%s/%ld, t=%x, fl=%x)\n",
535 filp->f_path.dentry->d_inode->i_sb->s_id,
536 filp->f_path.dentry->d_inode->i_ino,
537 fl->fl_type, fl->fl_flags);
538
539 /*
540 * No BSD flocks over NFS allowed.
541 * Note: we could try to fake a POSIX lock request here by
542 * using ((u32) filp | 0x80000000) or some such as the pid.
543 * Not sure whether that would be unique, though, or whether
544 * that would break in other places.
545 */
546 if (!(fl->fl_flags & FL_FLOCK))
547 return -ENOLCK;
548
549 /* We're simulating flock() locks using posix locks on the server */
550 fl->fl_owner = (fl_owner_t)filp;
551 fl->fl_start = 0;
552 fl->fl_end = OFFSET_MAX;
553
554 if (fl->fl_type == F_UNLCK)
555 return do_unlk(filp, cmd, fl);
556 return do_setlk(filp, cmd, fl);
557 }
This page took 0.04275 seconds and 6 git commands to generate.