2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
18 #include <linux/mount.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
44 * gfs2_llseek - seek to a location in a file
47 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 * SEEK_END requires the glock for the file because it references the
52 * Returns: The new offset, or errno
55 static loff_t
gfs2_llseek(struct file
*file
, loff_t offset
, int origin
)
57 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
58 struct gfs2_holder i_gh
;
62 case SEEK_END
: /* These reference inode->i_size */
65 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
68 error
= generic_file_llseek(file
, offset
, origin
);
69 gfs2_glock_dq_uninit(&i_gh
);
74 error
= generic_file_llseek(file
, offset
, origin
);
84 * gfs2_readdir - Read directory entries from a directory
85 * @file: The directory to read from
86 * @dirent: Buffer for dirents
87 * @filldir: Function used to do the copying
92 static int gfs2_readdir(struct file
*file
, void *dirent
, filldir_t filldir
)
94 struct inode
*dir
= file
->f_mapping
->host
;
95 struct gfs2_inode
*dip
= GFS2_I(dir
);
96 struct gfs2_holder d_gh
;
97 u64 offset
= file
->f_pos
;
100 gfs2_holder_init(dip
->i_gl
, LM_ST_SHARED
, 0, &d_gh
);
101 error
= gfs2_glock_nq(&d_gh
);
103 gfs2_holder_uninit(&d_gh
);
107 error
= gfs2_dir_read(dir
, &offset
, dirent
, filldir
, &file
->f_ra
);
109 gfs2_glock_dq_uninit(&d_gh
);
111 file
->f_pos
= offset
;
118 * @table: A table of 32 u32 flags
119 * @val: a 32 bit value to convert
121 * This function can be used to convert between fsflags values and
122 * GFS2's own flags values.
124 * Returns: the converted flags
126 static u32
fsflags_cvt(const u32
*table
, u32 val
)
138 static const u32 fsflags_to_gfs2
[32] = {
140 [4] = GFS2_DIF_IMMUTABLE
,
141 [5] = GFS2_DIF_APPENDONLY
,
142 [7] = GFS2_DIF_NOATIME
,
143 [12] = GFS2_DIF_EXHASH
,
144 [14] = GFS2_DIF_INHERIT_JDATA
,
145 [17] = GFS2_DIF_TOPDIR
,
148 static const u32 gfs2_to_fsflags
[32] = {
149 [gfs2fl_Sync
] = FS_SYNC_FL
,
150 [gfs2fl_Immutable
] = FS_IMMUTABLE_FL
,
151 [gfs2fl_AppendOnly
] = FS_APPEND_FL
,
152 [gfs2fl_NoAtime
] = FS_NOATIME_FL
,
153 [gfs2fl_ExHash
] = FS_INDEX_FL
,
154 [gfs2fl_TopLevel
] = FS_TOPDIR_FL
,
155 [gfs2fl_InheritJdata
] = FS_JOURNAL_DATA_FL
,
158 static int gfs2_get_flags(struct file
*filp
, u32 __user
*ptr
)
160 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
161 struct gfs2_inode
*ip
= GFS2_I(inode
);
162 struct gfs2_holder gh
;
166 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
167 error
= gfs2_glock_nq(&gh
);
171 fsflags
= fsflags_cvt(gfs2_to_fsflags
, ip
->i_diskflags
);
172 if (!S_ISDIR(inode
->i_mode
) && ip
->i_diskflags
& GFS2_DIF_JDATA
)
173 fsflags
|= FS_JOURNAL_DATA_FL
;
174 if (put_user(fsflags
, ptr
))
178 gfs2_holder_uninit(&gh
);
182 void gfs2_set_inode_flags(struct inode
*inode
)
184 struct gfs2_inode
*ip
= GFS2_I(inode
);
185 unsigned int flags
= inode
->i_flags
;
187 flags
&= ~(S_SYNC
|S_APPEND
|S_IMMUTABLE
|S_NOATIME
|S_DIRSYNC
|S_NOSEC
);
188 if ((ip
->i_eattr
== 0) && !is_sxid(inode
->i_mode
))
189 inode
->i_flags
|= S_NOSEC
;
190 if (ip
->i_diskflags
& GFS2_DIF_IMMUTABLE
)
191 flags
|= S_IMMUTABLE
;
192 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
)
194 if (ip
->i_diskflags
& GFS2_DIF_NOATIME
)
196 if (ip
->i_diskflags
& GFS2_DIF_SYNC
)
198 inode
->i_flags
= flags
;
201 /* Flags that can be set by user space */
202 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
203 GFS2_DIF_IMMUTABLE| \
204 GFS2_DIF_APPENDONLY| \
209 GFS2_DIF_INHERIT_JDATA)
212 * gfs2_set_flags - set flags on an inode
214 * @flags: The flags to set
215 * @mask: Indicates which flags are valid
218 static int do_gfs2_set_flags(struct file
*filp
, u32 reqflags
, u32 mask
)
220 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
221 struct gfs2_inode
*ip
= GFS2_I(inode
);
222 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
223 struct buffer_head
*bh
;
224 struct gfs2_holder gh
;
226 u32 new_flags
, flags
;
228 error
= mnt_want_write_file(filp
);
232 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
237 if (!inode_owner_or_capable(inode
))
241 flags
= ip
->i_diskflags
;
242 new_flags
= (flags
& ~mask
) | (reqflags
& mask
);
243 if ((new_flags
^ flags
) == 0)
247 if ((new_flags
^ flags
) & ~GFS2_FLAGS_USER_SET
)
251 if (IS_IMMUTABLE(inode
) && (new_flags
& GFS2_DIF_IMMUTABLE
))
253 if (IS_APPEND(inode
) && (new_flags
& GFS2_DIF_APPENDONLY
))
255 if (((new_flags
^ flags
) & GFS2_DIF_IMMUTABLE
) &&
256 !capable(CAP_LINUX_IMMUTABLE
))
258 if (!IS_IMMUTABLE(inode
)) {
259 error
= gfs2_permission(inode
, MAY_WRITE
);
263 if ((flags
^ new_flags
) & GFS2_DIF_JDATA
) {
264 if (flags
& GFS2_DIF_JDATA
)
265 gfs2_log_flush(sdp
, ip
->i_gl
);
266 error
= filemap_fdatawrite(inode
->i_mapping
);
269 error
= filemap_fdatawait(inode
->i_mapping
);
273 error
= gfs2_trans_begin(sdp
, RES_DINODE
, 0);
276 error
= gfs2_meta_inode_buffer(ip
, &bh
);
279 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
280 ip
->i_diskflags
= new_flags
;
281 gfs2_dinode_out(ip
, bh
->b_data
);
283 gfs2_set_inode_flags(inode
);
284 gfs2_set_aops(inode
);
288 gfs2_glock_dq_uninit(&gh
);
290 mnt_drop_write_file(filp
);
294 static int gfs2_set_flags(struct file
*filp
, u32 __user
*ptr
)
296 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
297 u32 fsflags
, gfsflags
;
299 if (get_user(fsflags
, ptr
))
302 gfsflags
= fsflags_cvt(fsflags_to_gfs2
, fsflags
);
303 if (!S_ISDIR(inode
->i_mode
)) {
304 gfsflags
&= ~GFS2_DIF_TOPDIR
;
305 if (gfsflags
& GFS2_DIF_INHERIT_JDATA
)
306 gfsflags
^= (GFS2_DIF_JDATA
| GFS2_DIF_INHERIT_JDATA
);
307 return do_gfs2_set_flags(filp
, gfsflags
, ~0);
309 return do_gfs2_set_flags(filp
, gfsflags
, ~GFS2_DIF_JDATA
);
312 static long gfs2_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
315 case FS_IOC_GETFLAGS
:
316 return gfs2_get_flags(filp
, (u32 __user
*)arg
);
317 case FS_IOC_SETFLAGS
:
318 return gfs2_set_flags(filp
, (u32 __user
*)arg
);
320 return gfs2_fitrim(filp
, (void __user
*)arg
);
326 * gfs2_allocate_page_backing - Use bmap to allocate blocks
327 * @page: The (locked) page to allocate backing for
329 * We try to allocate all the blocks required for the page in
330 * one go. This might fail for various reasons, so we keep
331 * trying until all the blocks to back this page are allocated.
332 * If some of the blocks are already allocated, thats ok too.
335 static int gfs2_allocate_page_backing(struct page
*page
)
337 struct inode
*inode
= page
->mapping
->host
;
338 struct buffer_head bh
;
339 unsigned long size
= PAGE_CACHE_SIZE
;
340 u64 lblock
= page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
345 gfs2_block_map(inode
, lblock
, &bh
, 1);
346 if (!buffer_mapped(&bh
))
349 lblock
+= (bh
.b_size
>> inode
->i_blkbits
);
355 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
356 * @vma: The virtual memory area
357 * @page: The page which is about to become writable
359 * When the page becomes writable, we need to ensure that we have
360 * blocks allocated on disk to back that page.
363 static int gfs2_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
365 struct page
*page
= vmf
->page
;
366 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
367 struct gfs2_inode
*ip
= GFS2_I(inode
);
368 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
369 unsigned long last_index
;
370 u64 pos
= page
->index
<< PAGE_CACHE_SHIFT
;
371 unsigned int data_blocks
, ind_blocks
, rblocks
;
372 struct gfs2_holder gh
;
376 /* Wait if fs is frozen. This is racy so we check again later on
377 * and retry if the fs has been frozen after the page lock has
380 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
382 ret
= gfs2_rs_alloc(ip
);
386 atomic_set(&ip
->i_res
->rs_sizehint
,
387 PAGE_CACHE_SIZE
/ sdp
->sd_sb
.sb_bsize
);
389 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
390 ret
= gfs2_glock_nq(&gh
);
394 set_bit(GLF_DIRTY
, &ip
->i_gl
->gl_flags
);
395 set_bit(GIF_SW_PAGED
, &ip
->i_flags
);
397 if (!gfs2_write_alloc_required(ip
, pos
, PAGE_CACHE_SIZE
)) {
399 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
) {
406 ret
= gfs2_rindex_update(sdp
);
410 ret
= gfs2_quota_lock_check(ip
);
413 gfs2_write_calc_reserv(ip
, PAGE_CACHE_SIZE
, &data_blocks
, &ind_blocks
);
414 ret
= gfs2_inplace_reserve(ip
, data_blocks
+ ind_blocks
);
416 goto out_quota_unlock
;
418 rblocks
= RES_DINODE
+ ind_blocks
;
419 if (gfs2_is_jdata(ip
))
420 rblocks
+= data_blocks
? data_blocks
: 1;
421 if (ind_blocks
|| data_blocks
) {
422 rblocks
+= RES_STATFS
+ RES_QUOTA
;
423 rblocks
+= gfs2_rg_blocks(ip
);
425 ret
= gfs2_trans_begin(sdp
, rblocks
, 0);
431 size
= i_size_read(inode
);
432 last_index
= (size
- 1) >> PAGE_CACHE_SHIFT
;
433 /* Check page index against inode size */
434 if (size
== 0 || (page
->index
> last_index
))
438 /* If truncated, we must retry the operation, we may have raced
439 * with the glock demotion code.
441 if (!PageUptodate(page
) || page
->mapping
!= inode
->i_mapping
)
444 /* Unstuff, if required, and allocate backing blocks for page */
446 if (gfs2_is_stuffed(ip
))
447 ret
= gfs2_unstuff_dinode(ip
, page
);
449 ret
= gfs2_allocate_page_backing(page
);
456 gfs2_inplace_release(ip
);
458 gfs2_quota_unlock(ip
);
462 gfs2_holder_uninit(&gh
);
464 set_page_dirty(page
);
465 /* This check must be post dropping of transaction lock */
466 if (inode
->i_sb
->s_frozen
== SB_UNFROZEN
) {
467 wait_on_page_writeback(page
);
473 return block_page_mkwrite_return(ret
);
476 static const struct vm_operations_struct gfs2_vm_ops
= {
477 .fault
= filemap_fault
,
478 .page_mkwrite
= gfs2_page_mkwrite
,
483 * @file: The file to map
484 * @vma: The VMA which described the mapping
486 * There is no need to get a lock here unless we should be updating
487 * atime. We ignore any locking errors since the only consequence is
488 * a missed atime update (which will just be deferred until later).
493 static int gfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
495 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
497 if (!(file
->f_flags
& O_NOATIME
) &&
498 !IS_NOATIME(&ip
->i_inode
)) {
499 struct gfs2_holder i_gh
;
502 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
503 error
= gfs2_glock_nq(&i_gh
);
506 gfs2_glock_dq(&i_gh
);
508 gfs2_holder_uninit(&i_gh
);
512 vma
->vm_ops
= &gfs2_vm_ops
;
513 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
519 * gfs2_open - open a file
520 * @inode: the inode to open
521 * @file: the struct file for this opening
526 static int gfs2_open(struct inode
*inode
, struct file
*file
)
528 struct gfs2_inode
*ip
= GFS2_I(inode
);
529 struct gfs2_holder i_gh
;
530 struct gfs2_file
*fp
;
533 fp
= kzalloc(sizeof(struct gfs2_file
), GFP_KERNEL
);
537 mutex_init(&fp
->f_fl_mutex
);
539 gfs2_assert_warn(GFS2_SB(inode
), !file
->private_data
);
540 file
->private_data
= fp
;
542 if (S_ISREG(ip
->i_inode
.i_mode
)) {
543 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
548 if (!(file
->f_flags
& O_LARGEFILE
) &&
549 i_size_read(inode
) > MAX_NON_LFS
) {
554 gfs2_glock_dq_uninit(&i_gh
);
560 gfs2_glock_dq_uninit(&i_gh
);
562 file
->private_data
= NULL
;
568 * gfs2_release - called to close a struct file
569 * @inode: the inode the struct file belongs to
570 * @file: the struct file being closed
575 static int gfs2_release(struct inode
*inode
, struct file
*file
)
577 struct gfs2_inode
*ip
= GFS2_I(inode
);
579 kfree(file
->private_data
);
580 file
->private_data
= NULL
;
582 if ((file
->f_mode
& FMODE_WRITE
) &&
583 (atomic_read(&inode
->i_writecount
) == 1))
590 * gfs2_fsync - sync the dirty data for a file (across the cluster)
591 * @file: the file that points to the dentry
592 * @start: the start position in the file to sync
593 * @end: the end position in the file to sync
594 * @datasync: set if we can ignore timestamp changes
596 * We split the data flushing here so that we don't wait for the data
597 * until after we've also sent the metadata to disk. Note that for
598 * data=ordered, we will write & wait for the data at the log flush
599 * stage anyway, so this is unlikely to make much of a difference
600 * except in the data=writeback case.
602 * If the fdatawrite fails due to any reason except -EIO, we will
603 * continue the remainder of the fsync, although we'll still report
604 * the error at the end. This is to match filemap_write_and_wait_range()
610 static int gfs2_fsync(struct file
*file
, loff_t start
, loff_t end
,
613 struct address_space
*mapping
= file
->f_mapping
;
614 struct inode
*inode
= mapping
->host
;
615 int sync_state
= inode
->i_state
& (I_DIRTY_SYNC
|I_DIRTY_DATASYNC
);
616 struct gfs2_inode
*ip
= GFS2_I(inode
);
617 int ret
= 0, ret1
= 0;
619 if (mapping
->nrpages
) {
620 ret1
= filemap_fdatawrite_range(mapping
, start
, end
);
626 sync_state
&= ~I_DIRTY_SYNC
;
629 ret
= sync_inode_metadata(inode
, 1);
632 if (gfs2_is_jdata(ip
))
633 filemap_write_and_wait(mapping
);
634 gfs2_ail_flush(ip
->i_gl
, 1);
637 if (mapping
->nrpages
)
638 ret
= filemap_fdatawait_range(mapping
, start
, end
);
640 return ret
? ret
: ret1
;
644 * gfs2_file_aio_write - Perform a write to a file
645 * @iocb: The io context
646 * @iov: The data to write
647 * @nr_segs: Number of @iov segments
648 * @pos: The file position
650 * We have to do a lock/unlock here to refresh the inode size for
651 * O_APPEND writes, otherwise we can land up writing at the wrong
652 * offset. There is still a race, but provided the app is using its
653 * own file locking, this will make O_APPEND work as expected.
657 static ssize_t
gfs2_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
658 unsigned long nr_segs
, loff_t pos
)
660 struct file
*file
= iocb
->ki_filp
;
661 size_t writesize
= iov_length(iov
, nr_segs
);
662 struct dentry
*dentry
= file
->f_dentry
;
663 struct gfs2_inode
*ip
= GFS2_I(dentry
->d_inode
);
664 struct gfs2_sbd
*sdp
;
667 sdp
= GFS2_SB(file
->f_mapping
->host
);
668 ret
= gfs2_rs_alloc(ip
);
672 atomic_set(&ip
->i_res
->rs_sizehint
, writesize
/ sdp
->sd_sb
.sb_bsize
);
673 if (file
->f_flags
& O_APPEND
) {
674 struct gfs2_holder gh
;
676 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &gh
);
679 gfs2_glock_dq_uninit(&gh
);
682 return generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
685 static int fallocate_chunk(struct inode
*inode
, loff_t offset
, loff_t len
,
688 struct gfs2_inode
*ip
= GFS2_I(inode
);
689 struct buffer_head
*dibh
;
692 unsigned int nr_blks
;
693 sector_t lblock
= offset
>> inode
->i_blkbits
;
695 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
699 gfs2_trans_add_bh(ip
->i_gl
, dibh
, 1);
701 if (gfs2_is_stuffed(ip
)) {
702 error
= gfs2_unstuff_dinode(ip
, NULL
);
708 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
710 set_buffer_zeronew(&bh_map
);
712 error
= gfs2_block_map(inode
, lblock
, &bh_map
, 1);
715 len
-= bh_map
.b_size
;
716 nr_blks
= bh_map
.b_size
>> inode
->i_blkbits
;
718 if (!buffer_new(&bh_map
))
720 if (unlikely(!buffer_zeronew(&bh_map
))) {
725 if (offset
+ size
> inode
->i_size
&& !(mode
& FALLOC_FL_KEEP_SIZE
))
726 i_size_write(inode
, offset
+ size
);
728 mark_inode_dirty(inode
);
735 static void calc_max_reserv(struct gfs2_inode
*ip
, loff_t max
, loff_t
*len
,
736 unsigned int *data_blocks
, unsigned int *ind_blocks
)
738 const struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
739 unsigned int max_blocks
= ip
->i_rgd
->rd_free_clone
;
740 unsigned int tmp
, max_data
= max_blocks
- 3 * (sdp
->sd_max_height
- 1);
742 for (tmp
= max_data
; tmp
> sdp
->sd_diptrs
;) {
743 tmp
= DIV_ROUND_UP(tmp
, sdp
->sd_inptrs
);
746 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
747 so it might end up with fewer data blocks */
748 if (max_data
<= *data_blocks
)
750 *data_blocks
= max_data
;
751 *ind_blocks
= max_blocks
- max_data
;
752 *len
= ((loff_t
)max_data
- 3) << sdp
->sd_sb
.sb_bsize_shift
;
755 gfs2_write_calc_reserv(ip
, max
, data_blocks
, ind_blocks
);
759 static long gfs2_fallocate(struct file
*file
, int mode
, loff_t offset
,
762 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
763 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
764 struct gfs2_inode
*ip
= GFS2_I(inode
);
765 unsigned int data_blocks
= 0, ind_blocks
= 0, rblocks
;
766 loff_t bytes
, max_bytes
;
768 const loff_t pos
= offset
;
769 const loff_t count
= len
;
770 loff_t bsize_mask
= ~((loff_t
)sdp
->sd_sb
.sb_bsize
- 1);
771 loff_t next
= (offset
+ len
- 1) >> sdp
->sd_sb
.sb_bsize_shift
;
772 loff_t max_chunk_size
= UINT_MAX
& bsize_mask
;
773 next
= (next
+ 1) << sdp
->sd_sb
.sb_bsize_shift
;
775 /* We only support the FALLOC_FL_KEEP_SIZE mode */
776 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
779 offset
&= bsize_mask
;
782 bytes
= sdp
->sd_max_rg_data
* sdp
->sd_sb
.sb_bsize
/ 2;
787 bytes
= sdp
->sd_sb
.sb_bsize
;
789 error
= gfs2_rs_alloc(ip
);
793 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &ip
->i_gh
);
794 error
= gfs2_glock_nq(&ip
->i_gh
);
798 atomic_set(&ip
->i_res
->rs_sizehint
, len
/ sdp
->sd_sb
.sb_bsize
);
803 if (!gfs2_write_alloc_required(ip
, offset
, bytes
)) {
808 error
= gfs2_quota_lock_check(ip
);
813 gfs2_write_calc_reserv(ip
, bytes
, &data_blocks
, &ind_blocks
);
815 error
= gfs2_inplace_reserve(ip
, data_blocks
+ ind_blocks
);
817 if (error
== -ENOSPC
&& bytes
> sdp
->sd_sb
.sb_bsize
) {
821 bytes
= sdp
->sd_sb
.sb_bsize
;
827 calc_max_reserv(ip
, (len
> max_chunk_size
)? max_chunk_size
: len
,
828 &max_bytes
, &data_blocks
, &ind_blocks
);
830 rblocks
= RES_DINODE
+ ind_blocks
+ RES_STATFS
+ RES_QUOTA
+
831 RES_RG_HDR
+ gfs2_rg_blocks(ip
);
832 if (gfs2_is_jdata(ip
))
833 rblocks
+= data_blocks
? data_blocks
: 1;
835 error
= gfs2_trans_begin(sdp
, rblocks
,
836 PAGE_CACHE_SIZE
/sdp
->sd_sb
.sb_bsize
);
840 error
= fallocate_chunk(inode
, offset
, max_bytes
, mode
);
848 gfs2_inplace_release(ip
);
849 gfs2_quota_unlock(ip
);
853 error
= generic_write_sync(file
, pos
, count
);
857 gfs2_inplace_release(ip
);
859 gfs2_quota_unlock(ip
);
861 gfs2_glock_dq(&ip
->i_gh
);
863 gfs2_holder_uninit(&ip
->i_gh
);
867 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
870 * gfs2_setlease - acquire/release a file lease
871 * @file: the file pointer
875 * We don't currently have a way to enforce a lease across the whole
876 * cluster; until we do, disable leases (by just returning -EINVAL),
877 * unless the administrator has requested purely local locking.
879 * Locking: called under lock_flocks
884 static int gfs2_setlease(struct file
*file
, long arg
, struct file_lock
**fl
)
890 * gfs2_lock - acquire/release a posix lock on a file
891 * @file: the file pointer
892 * @cmd: either modify or retrieve lock state, possibly wait
893 * @fl: type and range of lock
898 static int gfs2_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
900 struct gfs2_inode
*ip
= GFS2_I(file
->f_mapping
->host
);
901 struct gfs2_sbd
*sdp
= GFS2_SB(file
->f_mapping
->host
);
902 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
904 if (!(fl
->fl_flags
& FL_POSIX
))
906 if (__mandatory_lock(&ip
->i_inode
) && fl
->fl_type
!= F_UNLCK
)
909 if (cmd
== F_CANCELLK
) {
912 fl
->fl_type
= F_UNLCK
;
914 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
917 return dlm_posix_get(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
918 else if (fl
->fl_type
== F_UNLCK
)
919 return dlm_posix_unlock(ls
->ls_dlm
, ip
->i_no_addr
, file
, fl
);
921 return dlm_posix_lock(ls
->ls_dlm
, ip
->i_no_addr
, file
, cmd
, fl
);
924 static int do_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
926 struct gfs2_file
*fp
= file
->private_data
;
927 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
928 struct gfs2_inode
*ip
= GFS2_I(file
->f_path
.dentry
->d_inode
);
929 struct gfs2_glock
*gl
;
934 state
= (fl
->fl_type
== F_WRLCK
) ? LM_ST_EXCLUSIVE
: LM_ST_SHARED
;
935 flags
= (IS_SETLKW(cmd
) ? 0 : LM_FLAG_TRY
) | GL_EXACT
| GL_NOCACHE
;
937 mutex_lock(&fp
->f_fl_mutex
);
941 if (fl_gh
->gh_state
== state
)
943 flock_lock_file_wait(file
,
944 &(struct file_lock
){.fl_type
= F_UNLCK
});
945 gfs2_glock_dq_wait(fl_gh
);
946 gfs2_holder_reinit(state
, flags
, fl_gh
);
948 error
= gfs2_glock_get(GFS2_SB(&ip
->i_inode
), ip
->i_no_addr
,
949 &gfs2_flock_glops
, CREATE
, &gl
);
952 gfs2_holder_init(gl
, state
, flags
, fl_gh
);
955 error
= gfs2_glock_nq(fl_gh
);
957 gfs2_holder_uninit(fl_gh
);
958 if (error
== GLR_TRYFAILED
)
961 error
= flock_lock_file_wait(file
, fl
);
962 gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), !error
);
966 mutex_unlock(&fp
->f_fl_mutex
);
970 static void do_unflock(struct file
*file
, struct file_lock
*fl
)
972 struct gfs2_file
*fp
= file
->private_data
;
973 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
975 mutex_lock(&fp
->f_fl_mutex
);
976 flock_lock_file_wait(file
, fl
);
978 gfs2_glock_dq_wait(fl_gh
);
979 gfs2_holder_uninit(fl_gh
);
981 mutex_unlock(&fp
->f_fl_mutex
);
985 * gfs2_flock - acquire/release a flock lock on a file
986 * @file: the file pointer
987 * @cmd: either modify or retrieve lock state, possibly wait
988 * @fl: type and range of lock
993 static int gfs2_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
995 if (!(fl
->fl_flags
& FL_FLOCK
))
997 if (fl
->fl_type
& LOCK_MAND
)
1000 if (fl
->fl_type
== F_UNLCK
) {
1001 do_unflock(file
, fl
);
1004 return do_flock(file
, cmd
, fl
);
1008 const struct file_operations gfs2_file_fops
= {
1009 .llseek
= gfs2_llseek
,
1010 .read
= do_sync_read
,
1011 .aio_read
= generic_file_aio_read
,
1012 .write
= do_sync_write
,
1013 .aio_write
= gfs2_file_aio_write
,
1014 .unlocked_ioctl
= gfs2_ioctl
,
1017 .release
= gfs2_release
,
1018 .fsync
= gfs2_fsync
,
1020 .flock
= gfs2_flock
,
1021 .splice_read
= generic_file_splice_read
,
1022 .splice_write
= generic_file_splice_write
,
1023 .setlease
= gfs2_setlease
,
1024 .fallocate
= gfs2_fallocate
,
1027 const struct file_operations gfs2_dir_fops
= {
1028 .readdir
= gfs2_readdir
,
1029 .unlocked_ioctl
= gfs2_ioctl
,
1031 .release
= gfs2_release
,
1032 .fsync
= gfs2_fsync
,
1034 .flock
= gfs2_flock
,
1035 .llseek
= default_llseek
,
1038 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1040 const struct file_operations gfs2_file_fops_nolock
= {
1041 .llseek
= gfs2_llseek
,
1042 .read
= do_sync_read
,
1043 .aio_read
= generic_file_aio_read
,
1044 .write
= do_sync_write
,
1045 .aio_write
= gfs2_file_aio_write
,
1046 .unlocked_ioctl
= gfs2_ioctl
,
1049 .release
= gfs2_release
,
1050 .fsync
= gfs2_fsync
,
1051 .splice_read
= generic_file_splice_read
,
1052 .splice_write
= generic_file_splice_write
,
1053 .setlease
= generic_setlease
,
1054 .fallocate
= gfs2_fallocate
,
1057 const struct file_operations gfs2_dir_fops_nolock
= {
1058 .readdir
= gfs2_readdir
,
1059 .unlocked_ioctl
= gfs2_ioctl
,
1061 .release
= gfs2_release
,
1062 .fsync
= gfs2_fsync
,
1063 .llseek
= default_llseek
,