[GFS2] Use ->page_mkwrite() for mmap()
[deliverable/linux.git] / fs / gfs2 / ops_file.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/ext2_fs.h>
21 #include <linux/crc32.h>
22 #include <linux/lm_interface.h>
23 #include <linux/writeback.h>
24 #include <asm/uaccess.h>
25
26 #include "gfs2.h"
27 #include "incore.h"
28 #include "bmap.h"
29 #include "dir.h"
30 #include "glock.h"
31 #include "glops.h"
32 #include "inode.h"
33 #include "lm.h"
34 #include "log.h"
35 #include "meta_io.h"
36 #include "quota.h"
37 #include "rgrp.h"
38 #include "trans.h"
39 #include "util.h"
40 #include "eaops.h"
41
42 /**
43 * gfs2_llseek - seek to a location in a file
44 * @file: the file
45 * @offset: the offset
46 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
47 *
48 * SEEK_END requires the glock for the file because it references the
49 * file's size.
50 *
51 * Returns: The new offset, or errno
52 */
53
54 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
55 {
56 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
57 struct gfs2_holder i_gh;
58 loff_t error;
59
60 if (origin == 2) {
61 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
62 &i_gh);
63 if (!error) {
64 error = remote_llseek(file, offset, origin);
65 gfs2_glock_dq_uninit(&i_gh);
66 }
67 } else
68 error = remote_llseek(file, offset, origin);
69
70 return error;
71 }
72
73 /**
74 * gfs2_readdir - Read directory entries from a directory
75 * @file: The directory to read from
76 * @dirent: Buffer for dirents
77 * @filldir: Function used to do the copying
78 *
79 * Returns: errno
80 */
81
82 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
83 {
84 struct inode *dir = file->f_mapping->host;
85 struct gfs2_inode *dip = GFS2_I(dir);
86 struct gfs2_holder d_gh;
87 u64 offset = file->f_pos;
88 int error;
89
90 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
91 error = gfs2_glock_nq_atime(&d_gh);
92 if (error) {
93 gfs2_holder_uninit(&d_gh);
94 return error;
95 }
96
97 error = gfs2_dir_read(dir, &offset, dirent, filldir);
98
99 gfs2_glock_dq_uninit(&d_gh);
100
101 file->f_pos = offset;
102
103 return error;
104 }
105
106 /**
107 * fsflags_cvt
108 * @table: A table of 32 u32 flags
109 * @val: a 32 bit value to convert
110 *
111 * This function can be used to convert between fsflags values and
112 * GFS2's own flags values.
113 *
114 * Returns: the converted flags
115 */
116 static u32 fsflags_cvt(const u32 *table, u32 val)
117 {
118 u32 res = 0;
119 while(val) {
120 if (val & 1)
121 res |= *table;
122 table++;
123 val >>= 1;
124 }
125 return res;
126 }
127
128 static const u32 fsflags_to_gfs2[32] = {
129 [3] = GFS2_DIF_SYNC,
130 [4] = GFS2_DIF_IMMUTABLE,
131 [5] = GFS2_DIF_APPENDONLY,
132 [7] = GFS2_DIF_NOATIME,
133 [12] = GFS2_DIF_EXHASH,
134 [14] = GFS2_DIF_INHERIT_JDATA,
135 [20] = GFS2_DIF_INHERIT_DIRECTIO,
136 };
137
138 static const u32 gfs2_to_fsflags[32] = {
139 [gfs2fl_Sync] = FS_SYNC_FL,
140 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
141 [gfs2fl_AppendOnly] = FS_APPEND_FL,
142 [gfs2fl_NoAtime] = FS_NOATIME_FL,
143 [gfs2fl_ExHash] = FS_INDEX_FL,
144 [gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
146 };
147
148 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
149 {
150 struct inode *inode = filp->f_path.dentry->d_inode;
151 struct gfs2_inode *ip = GFS2_I(inode);
152 struct gfs2_holder gh;
153 int error;
154 u32 fsflags;
155
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
157 error = gfs2_glock_nq_atime(&gh);
158 if (error)
159 return error;
160
161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
162 if (!S_ISDIR(inode->i_mode)) {
163 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
164 fsflags |= FS_JOURNAL_DATA_FL;
165 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
166 fsflags |= FS_DIRECTIO_FL;
167 }
168 if (put_user(fsflags, ptr))
169 error = -EFAULT;
170
171 gfs2_glock_dq(&gh);
172 gfs2_holder_uninit(&gh);
173 return error;
174 }
175
176 void gfs2_set_inode_flags(struct inode *inode)
177 {
178 struct gfs2_inode *ip = GFS2_I(inode);
179 struct gfs2_dinode_host *di = &ip->i_di;
180 unsigned int flags = inode->i_flags;
181
182 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
183 if (di->di_flags & GFS2_DIF_IMMUTABLE)
184 flags |= S_IMMUTABLE;
185 if (di->di_flags & GFS2_DIF_APPENDONLY)
186 flags |= S_APPEND;
187 if (di->di_flags & GFS2_DIF_NOATIME)
188 flags |= S_NOATIME;
189 if (di->di_flags & GFS2_DIF_SYNC)
190 flags |= S_SYNC;
191 inode->i_flags = flags;
192 }
193
194 /* Flags that can be set by user space */
195 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
196 GFS2_DIF_DIRECTIO| \
197 GFS2_DIF_IMMUTABLE| \
198 GFS2_DIF_APPENDONLY| \
199 GFS2_DIF_NOATIME| \
200 GFS2_DIF_SYNC| \
201 GFS2_DIF_SYSTEM| \
202 GFS2_DIF_INHERIT_DIRECTIO| \
203 GFS2_DIF_INHERIT_JDATA)
204
205 /**
206 * gfs2_set_flags - set flags on an inode
207 * @inode: The inode
208 * @flags: The flags to set
209 * @mask: Indicates which flags are valid
210 *
211 */
212 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
213 {
214 struct inode *inode = filp->f_path.dentry->d_inode;
215 struct gfs2_inode *ip = GFS2_I(inode);
216 struct gfs2_sbd *sdp = GFS2_SB(inode);
217 struct buffer_head *bh;
218 struct gfs2_holder gh;
219 int error;
220 u32 new_flags, flags;
221
222 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
223 if (error)
224 return error;
225
226 flags = ip->i_di.di_flags;
227 new_flags = (flags & ~mask) | (reqflags & mask);
228 if ((new_flags ^ flags) == 0)
229 goto out;
230
231 error = -EINVAL;
232 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
233 goto out;
234
235 error = -EPERM;
236 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
237 goto out;
238 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
239 goto out;
240 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
241 !capable(CAP_LINUX_IMMUTABLE))
242 goto out;
243 if (!IS_IMMUTABLE(inode)) {
244 error = permission(inode, MAY_WRITE, NULL);
245 if (error)
246 goto out;
247 }
248
249 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
250 if (error)
251 goto out;
252 error = gfs2_meta_inode_buffer(ip, &bh);
253 if (error)
254 goto out_trans_end;
255 gfs2_trans_add_bh(ip->i_gl, bh, 1);
256 ip->i_di.di_flags = new_flags;
257 gfs2_dinode_out(ip, bh->b_data);
258 brelse(bh);
259 gfs2_set_inode_flags(inode);
260 out_trans_end:
261 gfs2_trans_end(sdp);
262 out:
263 gfs2_glock_dq_uninit(&gh);
264 return error;
265 }
266
267 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
268 {
269 struct inode *inode = filp->f_path.dentry->d_inode;
270 u32 fsflags, gfsflags;
271 if (get_user(fsflags, ptr))
272 return -EFAULT;
273 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
274 if (!S_ISDIR(inode->i_mode)) {
275 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
276 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
277 if (gfsflags & GFS2_DIF_INHERIT_DIRECTIO)
278 gfsflags ^= (GFS2_DIF_DIRECTIO | GFS2_DIF_INHERIT_DIRECTIO);
279 return do_gfs2_set_flags(filp, gfsflags, ~0);
280 }
281 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
282 }
283
284 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
285 {
286 switch(cmd) {
287 case FS_IOC_GETFLAGS:
288 return gfs2_get_flags(filp, (u32 __user *)arg);
289 case FS_IOC_SETFLAGS:
290 return gfs2_set_flags(filp, (u32 __user *)arg);
291 }
292 return -ENOTTY;
293 }
294
295 /**
296 * gfs2_allocate_page_backing - Use bmap to allocate blocks
297 * @page: The (locked) page to allocate backing for
298 *
299 * We try to allocate all the blocks required for the page in
300 * one go. This might fail for various reasons, so we keep
301 * trying until all the blocks to back this page are allocated.
302 * If some of the blocks are already allocated, thats ok too.
303 */
304
305 static int gfs2_allocate_page_backing(struct page *page)
306 {
307 struct inode *inode = page->mapping->host;
308 struct buffer_head bh;
309 unsigned long size = PAGE_CACHE_SIZE;
310 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
311
312 do {
313 bh.b_state = 0;
314 bh.b_size = size;
315 gfs2_block_map(inode, lblock, 1, &bh);
316 if (!buffer_mapped(&bh))
317 return -EIO;
318 size -= bh.b_size;
319 lblock += (bh.b_size >> inode->i_blkbits);
320 } while(size > 0);
321 return 0;
322 }
323
324 /**
325 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
326 * @vma: The virtual memory area
327 * @page: The page which is about to become writable
328 *
329 * When the page becomes writable, we need to ensure that we have
330 * blocks allocated on disk to back that page.
331 */
332
333 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
334 {
335 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
336 struct gfs2_inode *ip = GFS2_I(inode);
337 struct gfs2_sbd *sdp = GFS2_SB(inode);
338 unsigned long last_index;
339 u64 pos = page->index << (PAGE_CACHE_SIZE - inode->i_blkbits);
340 unsigned int data_blocks, ind_blocks, rblocks;
341 int alloc_required = 0;
342 struct gfs2_holder gh;
343 struct gfs2_alloc *al;
344 int ret;
345
346 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &gh);
347 ret = gfs2_glock_nq_atime(&gh);
348 if (ret)
349 goto out;
350
351 set_bit(GIF_SW_PAGED, &ip->i_flags);
352 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
353 ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
354 if (ret || !alloc_required)
355 goto out_unlock;
356
357 ip->i_alloc.al_requested = 0;
358 al = gfs2_alloc_get(ip);
359 ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
360 if (ret)
361 goto out_alloc_put;
362 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
363 if (ret)
364 goto out_quota_unlock;
365 al->al_requested = data_blocks + ind_blocks;
366 ret = gfs2_inplace_reserve(ip);
367 if (ret)
368 goto out_quota_unlock;
369
370 rblocks = RES_DINODE + ind_blocks;
371 if (gfs2_is_jdata(ip))
372 rblocks += data_blocks ? data_blocks : 1;
373 if (ind_blocks || data_blocks)
374 rblocks += RES_STATFS + RES_QUOTA;
375 ret = gfs2_trans_begin(sdp, rblocks, 0);
376 if (ret)
377 goto out_trans_fail;
378
379 lock_page(page);
380 ret = -EINVAL;
381 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
382 if (page->index > last_index)
383 goto out_unlock_page;
384 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
385 goto out_unlock_page;
386 if (gfs2_is_stuffed(ip)) {
387 ret = gfs2_unstuff_dinode(ip, page);
388 if (ret)
389 goto out_unlock_page;
390 }
391 ret = gfs2_allocate_page_backing(page);
392
393 out_unlock_page:
394 unlock_page(page);
395 gfs2_trans_end(sdp);
396 out_trans_fail:
397 gfs2_inplace_release(ip);
398 out_quota_unlock:
399 gfs2_quota_unlock(ip);
400 out_alloc_put:
401 gfs2_alloc_put(ip);
402 out_unlock:
403 gfs2_glock_dq(&gh);
404 out:
405 gfs2_holder_uninit(&gh);
406 return ret;
407 }
408
409 static struct vm_operations_struct gfs2_vm_ops = {
410 .fault = filemap_fault,
411 .page_mkwrite = gfs2_page_mkwrite,
412 };
413
414
415 /**
416 * gfs2_mmap -
417 * @file: The file to map
418 * @vma: The VMA which described the mapping
419 *
420 * Returns: 0 or error code
421 */
422
423 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
424 {
425 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
426 struct gfs2_holder i_gh;
427 int error;
428
429 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
430 error = gfs2_glock_nq_atime(&i_gh);
431 if (error) {
432 gfs2_holder_uninit(&i_gh);
433 return error;
434 }
435
436 vma->vm_ops = &gfs2_vm_ops;
437
438 gfs2_glock_dq_uninit(&i_gh);
439
440 return error;
441 }
442
443 /**
444 * gfs2_open - open a file
445 * @inode: the inode to open
446 * @file: the struct file for this opening
447 *
448 * Returns: errno
449 */
450
451 static int gfs2_open(struct inode *inode, struct file *file)
452 {
453 struct gfs2_inode *ip = GFS2_I(inode);
454 struct gfs2_holder i_gh;
455 struct gfs2_file *fp;
456 int error;
457
458 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
459 if (!fp)
460 return -ENOMEM;
461
462 mutex_init(&fp->f_fl_mutex);
463
464 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
465 file->private_data = fp;
466
467 if (S_ISREG(ip->i_inode.i_mode)) {
468 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
469 &i_gh);
470 if (error)
471 goto fail;
472
473 if (!(file->f_flags & O_LARGEFILE) &&
474 ip->i_di.di_size > MAX_NON_LFS) {
475 error = -EOVERFLOW;
476 goto fail_gunlock;
477 }
478
479 /* Listen to the Direct I/O flag */
480
481 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
482 file->f_flags |= O_DIRECT;
483
484 gfs2_glock_dq_uninit(&i_gh);
485 }
486
487 return 0;
488
489 fail_gunlock:
490 gfs2_glock_dq_uninit(&i_gh);
491 fail:
492 file->private_data = NULL;
493 kfree(fp);
494 return error;
495 }
496
497 /**
498 * gfs2_close - called to close a struct file
499 * @inode: the inode the struct file belongs to
500 * @file: the struct file being closed
501 *
502 * Returns: errno
503 */
504
505 static int gfs2_close(struct inode *inode, struct file *file)
506 {
507 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
508 struct gfs2_file *fp;
509
510 fp = file->private_data;
511 file->private_data = NULL;
512
513 if (gfs2_assert_warn(sdp, fp))
514 return -EIO;
515
516 kfree(fp);
517
518 return 0;
519 }
520
521 /**
522 * gfs2_fsync - sync the dirty data for a file (across the cluster)
523 * @file: the file that points to the dentry (we ignore this)
524 * @dentry: the dentry that points to the inode to sync
525 *
526 * The VFS will flush "normal" data for us. We only need to worry
527 * about metadata here. For journaled data, we just do a log flush
528 * as we can't avoid it. Otherwise we can just bale out if datasync
529 * is set. For stuffed inodes we must flush the log in order to
530 * ensure that all data is on disk.
531 *
532 * The call to write_inode_now() is there to write back metadata and
533 * the inode itself. It does also try and write the data, but thats
534 * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
535 * for us.
536 *
537 * Returns: errno
538 */
539
540 static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
541 {
542 struct inode *inode = dentry->d_inode;
543 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
544 int ret = 0;
545
546 if (gfs2_is_jdata(GFS2_I(inode))) {
547 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
548 return 0;
549 }
550
551 if (sync_state != 0) {
552 if (!datasync)
553 ret = write_inode_now(inode, 0);
554
555 if (gfs2_is_stuffed(GFS2_I(inode)))
556 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
557 }
558
559 return ret;
560 }
561
562 /**
563 * gfs2_setlease - acquire/release a file lease
564 * @file: the file pointer
565 * @arg: lease type
566 * @fl: file lock
567 *
568 * Returns: errno
569 */
570
571 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
572 {
573 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
574
575 /*
576 * We don't currently have a way to enforce a lease across the whole
577 * cluster; until we do, disable leases (by just returning -EINVAL),
578 * unless the administrator has requested purely local locking.
579 */
580 if (!sdp->sd_args.ar_localflocks)
581 return -EINVAL;
582 return generic_setlease(file, arg, fl);
583 }
584
585 /**
586 * gfs2_lock - acquire/release a posix lock on a file
587 * @file: the file pointer
588 * @cmd: either modify or retrieve lock state, possibly wait
589 * @fl: type and range of lock
590 *
591 * Returns: errno
592 */
593
594 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
595 {
596 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
597 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
598 struct lm_lockname name =
599 { .ln_number = ip->i_no_addr,
600 .ln_type = LM_TYPE_PLOCK };
601
602 if (!(fl->fl_flags & FL_POSIX))
603 return -ENOLCK;
604 if (__mandatory_lock(&ip->i_inode))
605 return -ENOLCK;
606
607 if (sdp->sd_args.ar_localflocks) {
608 if (IS_GETLK(cmd)) {
609 posix_test_lock(file, fl);
610 return 0;
611 } else {
612 return posix_lock_file_wait(file, fl);
613 }
614 }
615
616 if (cmd == F_CANCELLK) {
617 /* Hack: */
618 cmd = F_SETLK;
619 fl->fl_type = F_UNLCK;
620 }
621 if (IS_GETLK(cmd))
622 return gfs2_lm_plock_get(sdp, &name, file, fl);
623 else if (fl->fl_type == F_UNLCK)
624 return gfs2_lm_punlock(sdp, &name, file, fl);
625 else
626 return gfs2_lm_plock(sdp, &name, file, cmd, fl);
627 }
628
629 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
630 {
631 struct gfs2_file *fp = file->private_data;
632 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
633 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
634 struct gfs2_glock *gl;
635 unsigned int state;
636 int flags;
637 int error = 0;
638
639 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
640 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
641 | GL_FLOCK;
642
643 mutex_lock(&fp->f_fl_mutex);
644
645 gl = fl_gh->gh_gl;
646 if (gl) {
647 if (fl_gh->gh_state == state)
648 goto out;
649 flock_lock_file_wait(file,
650 &(struct file_lock){.fl_type = F_UNLCK});
651 gfs2_glock_dq_wait(fl_gh);
652 gfs2_holder_reinit(state, flags, fl_gh);
653 } else {
654 error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
655 ip->i_no_addr, &gfs2_flock_glops,
656 CREATE, &gl);
657 if (error)
658 goto out;
659 gfs2_holder_init(gl, state, flags, fl_gh);
660 gfs2_glock_put(gl);
661 }
662 error = gfs2_glock_nq(fl_gh);
663 if (error) {
664 gfs2_holder_uninit(fl_gh);
665 if (error == GLR_TRYFAILED)
666 error = -EAGAIN;
667 } else {
668 error = flock_lock_file_wait(file, fl);
669 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
670 }
671
672 out:
673 mutex_unlock(&fp->f_fl_mutex);
674 return error;
675 }
676
677 static void do_unflock(struct file *file, struct file_lock *fl)
678 {
679 struct gfs2_file *fp = file->private_data;
680 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
681
682 mutex_lock(&fp->f_fl_mutex);
683 flock_lock_file_wait(file, fl);
684 if (fl_gh->gh_gl)
685 gfs2_glock_dq_uninit(fl_gh);
686 mutex_unlock(&fp->f_fl_mutex);
687 }
688
689 /**
690 * gfs2_flock - acquire/release a flock lock on a file
691 * @file: the file pointer
692 * @cmd: either modify or retrieve lock state, possibly wait
693 * @fl: type and range of lock
694 *
695 * Returns: errno
696 */
697
698 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
699 {
700 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
701 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
702
703 if (!(fl->fl_flags & FL_FLOCK))
704 return -ENOLCK;
705 if (__mandatory_lock(&ip->i_inode))
706 return -ENOLCK;
707
708 if (sdp->sd_args.ar_localflocks)
709 return flock_lock_file_wait(file, fl);
710
711 if (fl->fl_type == F_UNLCK) {
712 do_unflock(file, fl);
713 return 0;
714 } else {
715 return do_flock(file, cmd, fl);
716 }
717 }
718
719 const struct file_operations gfs2_file_fops = {
720 .llseek = gfs2_llseek,
721 .read = do_sync_read,
722 .aio_read = generic_file_aio_read,
723 .write = do_sync_write,
724 .aio_write = generic_file_aio_write,
725 .unlocked_ioctl = gfs2_ioctl,
726 .mmap = gfs2_mmap,
727 .open = gfs2_open,
728 .release = gfs2_close,
729 .fsync = gfs2_fsync,
730 .lock = gfs2_lock,
731 .flock = gfs2_flock,
732 .splice_read = generic_file_splice_read,
733 .splice_write = generic_file_splice_write,
734 .setlease = gfs2_setlease,
735 };
736
737 const struct file_operations gfs2_dir_fops = {
738 .readdir = gfs2_readdir,
739 .unlocked_ioctl = gfs2_ioctl,
740 .open = gfs2_open,
741 .release = gfs2_close,
742 .fsync = gfs2_fsync,
743 .lock = gfs2_lock,
744 .flock = gfs2_flock,
745 };
746
This page took 0.060452 seconds and 6 git commands to generate.