1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * File open, close, extend, truncate
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/capability.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
39 #include <linux/blkdev.h>
41 #include <cluster/masklog.h>
49 #include "extent_map.h"
62 #include "refcounttree.h"
63 #include "ocfs2_trace.h"
65 #include "buffer_head_io.h"
67 static int ocfs2_init_file_private(struct inode
*inode
, struct file
*file
)
69 struct ocfs2_file_private
*fp
;
71 fp
= kzalloc(sizeof(struct ocfs2_file_private
), GFP_KERNEL
);
76 mutex_init(&fp
->fp_mutex
);
77 ocfs2_file_lock_res_init(&fp
->fp_flock
, fp
);
78 file
->private_data
= fp
;
83 static void ocfs2_free_file_private(struct inode
*inode
, struct file
*file
)
85 struct ocfs2_file_private
*fp
= file
->private_data
;
86 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
89 ocfs2_simple_drop_lockres(osb
, &fp
->fp_flock
);
90 ocfs2_lock_res_free(&fp
->fp_flock
);
92 file
->private_data
= NULL
;
96 static int ocfs2_file_open(struct inode
*inode
, struct file
*file
)
99 int mode
= file
->f_flags
;
100 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
102 trace_ocfs2_file_open(inode
, file
, file
->f_path
.dentry
,
103 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
104 file
->f_path
.dentry
->d_name
.len
,
105 file
->f_path
.dentry
->d_name
.name
, mode
);
107 if (file
->f_mode
& FMODE_WRITE
)
108 dquot_initialize(inode
);
110 spin_lock(&oi
->ip_lock
);
112 /* Check that the inode hasn't been wiped from disk by another
113 * node. If it hasn't then we're safe as long as we hold the
114 * spin lock until our increment of open count. */
115 if (OCFS2_I(inode
)->ip_flags
& OCFS2_INODE_DELETED
) {
116 spin_unlock(&oi
->ip_lock
);
123 oi
->ip_flags
|= OCFS2_INODE_OPEN_DIRECT
;
126 spin_unlock(&oi
->ip_lock
);
128 status
= ocfs2_init_file_private(inode
, file
);
131 * We want to set open count back if we're failing the
134 spin_lock(&oi
->ip_lock
);
136 spin_unlock(&oi
->ip_lock
);
143 static int ocfs2_file_release(struct inode
*inode
, struct file
*file
)
145 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
147 spin_lock(&oi
->ip_lock
);
148 if (!--oi
->ip_open_count
)
149 oi
->ip_flags
&= ~OCFS2_INODE_OPEN_DIRECT
;
151 trace_ocfs2_file_release(inode
, file
, file
->f_path
.dentry
,
153 file
->f_path
.dentry
->d_name
.len
,
154 file
->f_path
.dentry
->d_name
.name
,
156 spin_unlock(&oi
->ip_lock
);
158 ocfs2_free_file_private(inode
, file
);
163 static int ocfs2_dir_open(struct inode
*inode
, struct file
*file
)
165 return ocfs2_init_file_private(inode
, file
);
168 static int ocfs2_dir_release(struct inode
*inode
, struct file
*file
)
170 ocfs2_free_file_private(inode
, file
);
174 static int ocfs2_sync_file(struct file
*file
, int datasync
)
178 struct inode
*inode
= file
->f_mapping
->host
;
179 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
181 trace_ocfs2_sync_file(inode
, file
, file
->f_path
.dentry
,
182 OCFS2_I(inode
)->ip_blkno
,
183 file
->f_path
.dentry
->d_name
.len
,
184 file
->f_path
.dentry
->d_name
.name
,
185 (unsigned long long)datasync
);
187 if (datasync
&& !(inode
->i_state
& I_DIRTY_DATASYNC
)) {
189 * We still have to flush drive's caches to get data to the
192 if (osb
->s_mount_opt
& OCFS2_MOUNT_BARRIER
)
193 blkdev_issue_flush(inode
->i_sb
->s_bdev
, GFP_KERNEL
, NULL
);
197 journal
= osb
->journal
->j_journal
;
198 err
= jbd2_journal_force_commit(journal
);
204 return (err
< 0) ? -EIO
: 0;
207 int ocfs2_should_update_atime(struct inode
*inode
,
208 struct vfsmount
*vfsmnt
)
211 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
213 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
216 if ((inode
->i_flags
& S_NOATIME
) ||
217 ((inode
->i_sb
->s_flags
& MS_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
221 * We can be called with no vfsmnt structure - NFSD will
224 * Note that our action here is different than touch_atime() -
225 * if we can't tell whether this is a noatime mount, then we
226 * don't know whether to trust the value of s_atime_quantum.
231 if ((vfsmnt
->mnt_flags
& MNT_NOATIME
) ||
232 ((vfsmnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
235 if (vfsmnt
->mnt_flags
& MNT_RELATIME
) {
236 if ((timespec_compare(&inode
->i_atime
, &inode
->i_mtime
) <= 0) ||
237 (timespec_compare(&inode
->i_atime
, &inode
->i_ctime
) <= 0))
244 if ((now
.tv_sec
- inode
->i_atime
.tv_sec
<= osb
->s_atime_quantum
))
250 int ocfs2_update_inode_atime(struct inode
*inode
,
251 struct buffer_head
*bh
)
254 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
256 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*) bh
->b_data
;
258 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
259 if (IS_ERR(handle
)) {
260 ret
= PTR_ERR(handle
);
265 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
266 OCFS2_JOURNAL_ACCESS_WRITE
);
273 * Don't use ocfs2_mark_inode_dirty() here as we don't always
274 * have i_mutex to guard against concurrent changes to other
277 inode
->i_atime
= CURRENT_TIME
;
278 di
->i_atime
= cpu_to_le64(inode
->i_atime
.tv_sec
);
279 di
->i_atime_nsec
= cpu_to_le32(inode
->i_atime
.tv_nsec
);
280 ocfs2_journal_dirty(handle
, bh
);
283 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
288 static int ocfs2_set_inode_size(handle_t
*handle
,
290 struct buffer_head
*fe_bh
,
295 i_size_write(inode
, new_i_size
);
296 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
297 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
299 status
= ocfs2_mark_inode_dirty(handle
, inode
, fe_bh
);
309 int ocfs2_simple_size_update(struct inode
*inode
,
310 struct buffer_head
*di_bh
,
314 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
315 handle_t
*handle
= NULL
;
317 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
318 if (IS_ERR(handle
)) {
319 ret
= PTR_ERR(handle
);
324 ret
= ocfs2_set_inode_size(handle
, inode
, di_bh
,
329 ocfs2_commit_trans(osb
, handle
);
334 static int ocfs2_cow_file_pos(struct inode
*inode
,
335 struct buffer_head
*fe_bh
,
339 u32 phys
, cpos
= offset
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
340 unsigned int num_clusters
= 0;
341 unsigned int ext_flags
= 0;
344 * If the new offset is aligned to the range of the cluster, there is
345 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
348 if ((offset
& (OCFS2_SB(inode
->i_sb
)->s_clustersize
- 1)) == 0)
351 status
= ocfs2_get_clusters(inode
, cpos
, &phys
,
352 &num_clusters
, &ext_flags
);
358 if (!(ext_flags
& OCFS2_EXT_REFCOUNTED
))
361 return ocfs2_refcount_cow(inode
, NULL
, fe_bh
, cpos
, 1, cpos
+1);
367 static int ocfs2_orphan_for_truncate(struct ocfs2_super
*osb
,
369 struct buffer_head
*fe_bh
,
374 struct ocfs2_dinode
*di
;
378 * We need to CoW the cluster contains the offset if it is reflinked
379 * since we will call ocfs2_zero_range_for_truncate later which will
380 * write "0" from offset to the end of the cluster.
382 status
= ocfs2_cow_file_pos(inode
, fe_bh
, new_i_size
);
388 /* TODO: This needs to actually orphan the inode in this
391 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
392 if (IS_ERR(handle
)) {
393 status
= PTR_ERR(handle
);
398 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), fe_bh
,
399 OCFS2_JOURNAL_ACCESS_WRITE
);
406 * Do this before setting i_size.
408 cluster_bytes
= ocfs2_align_bytes_to_clusters(inode
->i_sb
, new_i_size
);
409 status
= ocfs2_zero_range_for_truncate(inode
, handle
, new_i_size
,
416 i_size_write(inode
, new_i_size
);
417 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
419 di
= (struct ocfs2_dinode
*) fe_bh
->b_data
;
420 di
->i_size
= cpu_to_le64(new_i_size
);
421 di
->i_ctime
= di
->i_mtime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
422 di
->i_ctime_nsec
= di
->i_mtime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
424 ocfs2_journal_dirty(handle
, fe_bh
);
427 ocfs2_commit_trans(osb
, handle
);
432 static int ocfs2_truncate_file(struct inode
*inode
,
433 struct buffer_head
*di_bh
,
437 struct ocfs2_dinode
*fe
= NULL
;
438 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
440 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
441 * already validated it */
442 fe
= (struct ocfs2_dinode
*) di_bh
->b_data
;
444 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode
)->ip_blkno
,
445 (unsigned long long)le64_to_cpu(fe
->i_size
),
446 (unsigned long long)new_i_size
);
448 mlog_bug_on_msg(le64_to_cpu(fe
->i_size
) != i_size_read(inode
),
449 "Inode %llu, inode i_size = %lld != di "
450 "i_size = %llu, i_flags = 0x%x\n",
451 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
453 (unsigned long long)le64_to_cpu(fe
->i_size
),
454 le32_to_cpu(fe
->i_flags
));
456 if (new_i_size
> le64_to_cpu(fe
->i_size
)) {
457 trace_ocfs2_truncate_file_error(
458 (unsigned long long)le64_to_cpu(fe
->i_size
),
459 (unsigned long long)new_i_size
);
465 /* lets handle the simple truncate cases before doing any more
466 * cluster locking. */
467 if (new_i_size
== le64_to_cpu(fe
->i_size
))
470 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
472 ocfs2_resv_discard(&osb
->osb_la_resmap
,
473 &OCFS2_I(inode
)->ip_la_data_resv
);
476 * The inode lock forced other nodes to sync and drop their
477 * pages, which (correctly) happens even if we have a truncate
478 * without allocation change - ocfs2 cluster sizes can be much
479 * greater than page size, so we have to truncate them
482 unmap_mapping_range(inode
->i_mapping
, new_i_size
+ PAGE_SIZE
- 1, 0, 1);
483 truncate_inode_pages(inode
->i_mapping
, new_i_size
);
485 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
486 status
= ocfs2_truncate_inline(inode
, di_bh
, new_i_size
,
487 i_size_read(inode
), 1);
491 goto bail_unlock_sem
;
494 /* alright, we're going to need to do a full blown alloc size
495 * change. Orphan the inode so that recovery can complete the
496 * truncate if necessary. This does the task of marking
498 status
= ocfs2_orphan_for_truncate(osb
, inode
, di_bh
, new_i_size
);
501 goto bail_unlock_sem
;
504 status
= ocfs2_commit_truncate(osb
, inode
, di_bh
);
507 goto bail_unlock_sem
;
510 /* TODO: orphan dir cleanup here. */
512 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
515 if (!status
&& OCFS2_I(inode
)->ip_clusters
== 0)
516 status
= ocfs2_try_remove_refcount_tree(inode
, di_bh
);
522 * extend file allocation only here.
523 * we'll update all the disk stuff, and oip->alloc_size
525 * expect stuff to be locked, a transaction started and enough data /
526 * metadata reservations in the contexts.
528 * Will return -EAGAIN, and a reason if a restart is needed.
529 * If passed in, *reason will always be set, even in error.
531 int ocfs2_add_inode_data(struct ocfs2_super
*osb
,
536 struct buffer_head
*fe_bh
,
538 struct ocfs2_alloc_context
*data_ac
,
539 struct ocfs2_alloc_context
*meta_ac
,
540 enum ocfs2_alloc_restarted
*reason_ret
)
543 struct ocfs2_extent_tree et
;
545 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), fe_bh
);
546 ret
= ocfs2_add_clusters_in_btree(handle
, &et
, logical_offset
,
547 clusters_to_add
, mark_unwritten
,
548 data_ac
, meta_ac
, reason_ret
);
553 static int __ocfs2_extend_allocation(struct inode
*inode
, u32 logical_start
,
554 u32 clusters_to_add
, int mark_unwritten
)
557 int restart_func
= 0;
560 struct buffer_head
*bh
= NULL
;
561 struct ocfs2_dinode
*fe
= NULL
;
562 handle_t
*handle
= NULL
;
563 struct ocfs2_alloc_context
*data_ac
= NULL
;
564 struct ocfs2_alloc_context
*meta_ac
= NULL
;
565 enum ocfs2_alloc_restarted why
;
566 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
567 struct ocfs2_extent_tree et
;
571 * This function only exists for file systems which don't
574 BUG_ON(mark_unwritten
&& !ocfs2_sparse_alloc(osb
));
576 status
= ocfs2_read_inode_block(inode
, &bh
);
581 fe
= (struct ocfs2_dinode
*) bh
->b_data
;
584 BUG_ON(le32_to_cpu(fe
->i_clusters
) != OCFS2_I(inode
)->ip_clusters
);
586 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), bh
);
587 status
= ocfs2_lock_allocators(inode
, &et
, clusters_to_add
, 0,
594 credits
= ocfs2_calc_extend_credits(osb
->sb
, &fe
->id2
.i_list
,
596 handle
= ocfs2_start_trans(osb
, credits
);
597 if (IS_ERR(handle
)) {
598 status
= PTR_ERR(handle
);
604 restarted_transaction
:
605 trace_ocfs2_extend_allocation(
606 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
607 (unsigned long long)i_size_read(inode
),
608 le32_to_cpu(fe
->i_clusters
), clusters_to_add
,
611 status
= dquot_alloc_space_nodirty(inode
,
612 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
617 /* reserve a write to the file entry early on - that we if we
618 * run out of credits in the allocation path, we can still
620 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
621 OCFS2_JOURNAL_ACCESS_WRITE
);
627 prev_clusters
= OCFS2_I(inode
)->ip_clusters
;
629 status
= ocfs2_add_inode_data(osb
,
639 if ((status
< 0) && (status
!= -EAGAIN
)) {
640 if (status
!= -ENOSPC
)
645 ocfs2_journal_dirty(handle
, bh
);
647 spin_lock(&OCFS2_I(inode
)->ip_lock
);
648 clusters_to_add
-= (OCFS2_I(inode
)->ip_clusters
- prev_clusters
);
649 spin_unlock(&OCFS2_I(inode
)->ip_lock
);
650 /* Release unused quota reservation */
651 dquot_free_space(inode
,
652 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
655 if (why
!= RESTART_NONE
&& clusters_to_add
) {
656 if (why
== RESTART_META
) {
660 BUG_ON(why
!= RESTART_TRANS
);
662 /* TODO: This can be more intelligent. */
663 credits
= ocfs2_calc_extend_credits(osb
->sb
,
666 status
= ocfs2_extend_trans(handle
, credits
);
668 /* handle still has to be committed at
674 goto restarted_transaction
;
678 trace_ocfs2_extend_allocation_end(OCFS2_I(inode
)->ip_blkno
,
679 le32_to_cpu(fe
->i_clusters
),
680 (unsigned long long)le64_to_cpu(fe
->i_size
),
681 OCFS2_I(inode
)->ip_clusters
,
682 (unsigned long long)i_size_read(inode
));
685 if (status
< 0 && did_quota
)
686 dquot_free_space(inode
,
687 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
689 ocfs2_commit_trans(osb
, handle
);
693 ocfs2_free_alloc_context(data_ac
);
697 ocfs2_free_alloc_context(meta_ac
);
700 if ((!status
) && restart_func
) {
711 * While a write will already be ordering the data, a truncate will not.
712 * Thus, we need to explicitly order the zeroed pages.
714 static handle_t
*ocfs2_zero_start_ordered_transaction(struct inode
*inode
)
716 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
717 handle_t
*handle
= NULL
;
720 if (!ocfs2_should_order_data(inode
))
723 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
724 if (IS_ERR(handle
)) {
730 ret
= ocfs2_jbd2_file_inode(handle
, inode
);
737 ocfs2_commit_trans(osb
, handle
);
738 handle
= ERR_PTR(ret
);
743 /* Some parts of this taken from generic_cont_expand, which turned out
744 * to be too fragile to do exactly what we need without us having to
745 * worry about recursive locking in ->write_begin() and ->write_end(). */
746 static int ocfs2_write_zero_page(struct inode
*inode
, u64 abs_from
,
749 struct address_space
*mapping
= inode
->i_mapping
;
751 unsigned long index
= abs_from
>> PAGE_CACHE_SHIFT
;
752 handle_t
*handle
= NULL
;
754 unsigned zero_from
, zero_to
, block_start
, block_end
;
756 BUG_ON(abs_from
>= abs_to
);
757 BUG_ON(abs_to
> (((u64
)index
+ 1) << PAGE_CACHE_SHIFT
));
758 BUG_ON(abs_from
& (inode
->i_blkbits
- 1));
760 page
= find_or_create_page(mapping
, index
, GFP_NOFS
);
767 /* Get the offsets within the page that we want to zero */
768 zero_from
= abs_from
& (PAGE_CACHE_SIZE
- 1);
769 zero_to
= abs_to
& (PAGE_CACHE_SIZE
- 1);
771 zero_to
= PAGE_CACHE_SIZE
;
773 trace_ocfs2_write_zero_page(
774 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
775 (unsigned long long)abs_from
,
776 (unsigned long long)abs_to
,
777 index
, zero_from
, zero_to
);
779 /* We know that zero_from is block aligned */
780 for (block_start
= zero_from
; block_start
< zero_to
;
781 block_start
= block_end
) {
782 block_end
= block_start
+ (1 << inode
->i_blkbits
);
785 * block_start is block-aligned. Bump it by one to force
786 * __block_write_begin and block_commit_write to zero the
789 ret
= __block_write_begin(page
, block_start
+ 1, 0,
797 handle
= ocfs2_zero_start_ordered_transaction(inode
);
798 if (IS_ERR(handle
)) {
799 ret
= PTR_ERR(handle
);
805 /* must not update i_size! */
806 ret
= block_commit_write(page
, block_start
+ 1,
815 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
819 page_cache_release(page
);
825 * Find the next range to zero. We do this in terms of bytes because
826 * that's what ocfs2_zero_extend() wants, and it is dealing with the
827 * pagecache. We may return multiple extents.
829 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
830 * needs to be zeroed. range_start and range_end return the next zeroing
831 * range. A subsequent call should pass the previous range_end as its
832 * zero_start. If range_end is 0, there's nothing to do.
834 * Unwritten extents are skipped over. Refcounted extents are CoWd.
836 static int ocfs2_zero_extend_get_range(struct inode
*inode
,
837 struct buffer_head
*di_bh
,
838 u64 zero_start
, u64 zero_end
,
839 u64
*range_start
, u64
*range_end
)
841 int rc
= 0, needs_cow
= 0;
842 u32 p_cpos
, zero_clusters
= 0;
844 zero_start
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
845 u32 last_cpos
= ocfs2_clusters_for_bytes(inode
->i_sb
, zero_end
);
846 unsigned int num_clusters
= 0;
847 unsigned int ext_flags
= 0;
849 while (zero_cpos
< last_cpos
) {
850 rc
= ocfs2_get_clusters(inode
, zero_cpos
, &p_cpos
,
851 &num_clusters
, &ext_flags
);
857 if (p_cpos
&& !(ext_flags
& OCFS2_EXT_UNWRITTEN
)) {
858 zero_clusters
= num_clusters
;
859 if (ext_flags
& OCFS2_EXT_REFCOUNTED
)
864 zero_cpos
+= num_clusters
;
866 if (!zero_clusters
) {
871 while ((zero_cpos
+ zero_clusters
) < last_cpos
) {
872 rc
= ocfs2_get_clusters(inode
, zero_cpos
+ zero_clusters
,
873 &p_cpos
, &num_clusters
,
880 if (!p_cpos
|| (ext_flags
& OCFS2_EXT_UNWRITTEN
))
882 if (ext_flags
& OCFS2_EXT_REFCOUNTED
)
884 zero_clusters
+= num_clusters
;
886 if ((zero_cpos
+ zero_clusters
) > last_cpos
)
887 zero_clusters
= last_cpos
- zero_cpos
;
890 rc
= ocfs2_refcount_cow(inode
, NULL
, di_bh
, zero_cpos
,
891 zero_clusters
, UINT_MAX
);
898 *range_start
= ocfs2_clusters_to_bytes(inode
->i_sb
, zero_cpos
);
899 *range_end
= ocfs2_clusters_to_bytes(inode
->i_sb
,
900 zero_cpos
+ zero_clusters
);
907 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
908 * has made sure that the entire range needs zeroing.
910 static int ocfs2_zero_extend_range(struct inode
*inode
, u64 range_start
,
915 u64 zero_pos
= range_start
;
917 trace_ocfs2_zero_extend_range(
918 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
919 (unsigned long long)range_start
,
920 (unsigned long long)range_end
);
921 BUG_ON(range_start
>= range_end
);
923 while (zero_pos
< range_end
) {
924 next_pos
= (zero_pos
& PAGE_CACHE_MASK
) + PAGE_CACHE_SIZE
;
925 if (next_pos
> range_end
)
926 next_pos
= range_end
;
927 rc
= ocfs2_write_zero_page(inode
, zero_pos
, next_pos
);
935 * Very large extends have the potential to lock up
936 * the cpu for extended periods of time.
944 int ocfs2_zero_extend(struct inode
*inode
, struct buffer_head
*di_bh
,
948 u64 zero_start
, range_start
= 0, range_end
= 0;
949 struct super_block
*sb
= inode
->i_sb
;
951 zero_start
= ocfs2_align_bytes_to_blocks(sb
, i_size_read(inode
));
952 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode
)->ip_blkno
,
953 (unsigned long long)zero_start
,
954 (unsigned long long)i_size_read(inode
));
955 while (zero_start
< zero_to_size
) {
956 ret
= ocfs2_zero_extend_get_range(inode
, di_bh
, zero_start
,
967 if (range_start
< zero_start
)
968 range_start
= zero_start
;
969 if (range_end
> zero_to_size
)
970 range_end
= zero_to_size
;
972 ret
= ocfs2_zero_extend_range(inode
, range_start
,
978 zero_start
= range_end
;
984 int ocfs2_extend_no_holes(struct inode
*inode
, struct buffer_head
*di_bh
,
985 u64 new_i_size
, u64 zero_to
)
989 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
992 * Only quota files call this without a bh, and they can't be
995 BUG_ON(!di_bh
&& (oi
->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
));
996 BUG_ON(!di_bh
&& !(oi
->ip_flags
& OCFS2_INODE_SYSTEM_FILE
));
998 clusters_to_add
= ocfs2_clusters_for_bytes(inode
->i_sb
, new_i_size
);
999 if (clusters_to_add
< oi
->ip_clusters
)
1000 clusters_to_add
= 0;
1002 clusters_to_add
-= oi
->ip_clusters
;
1004 if (clusters_to_add
) {
1005 ret
= __ocfs2_extend_allocation(inode
, oi
->ip_clusters
,
1006 clusters_to_add
, 0);
1014 * Call this even if we don't add any clusters to the tree. We
1015 * still need to zero the area between the old i_size and the
1018 ret
= ocfs2_zero_extend(inode
, di_bh
, zero_to
);
1026 static int ocfs2_extend_file(struct inode
*inode
,
1027 struct buffer_head
*di_bh
,
1031 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1035 /* setattr sometimes calls us like this. */
1036 if (new_i_size
== 0)
1039 if (i_size_read(inode
) == new_i_size
)
1041 BUG_ON(new_i_size
< i_size_read(inode
));
1044 * The alloc sem blocks people in read/write from reading our
1045 * allocation until we're done changing it. We depend on
1046 * i_mutex to block other extend/truncate calls while we're
1047 * here. We even have to hold it for sparse files because there
1048 * might be some tail zeroing.
1050 down_write(&oi
->ip_alloc_sem
);
1052 if (oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1054 * We can optimize small extends by keeping the inodes
1057 if (ocfs2_size_fits_inline_data(di_bh
, new_i_size
)) {
1058 up_write(&oi
->ip_alloc_sem
);
1059 goto out_update_size
;
1062 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
1064 up_write(&oi
->ip_alloc_sem
);
1070 if (ocfs2_sparse_alloc(OCFS2_SB(inode
->i_sb
)))
1071 ret
= ocfs2_zero_extend(inode
, di_bh
, new_i_size
);
1073 ret
= ocfs2_extend_no_holes(inode
, di_bh
, new_i_size
,
1076 up_write(&oi
->ip_alloc_sem
);
1084 ret
= ocfs2_simple_size_update(inode
, di_bh
, new_i_size
);
1092 int ocfs2_setattr(struct dentry
*dentry
, struct iattr
*attr
)
1094 int status
= 0, size_change
;
1095 struct inode
*inode
= dentry
->d_inode
;
1096 struct super_block
*sb
= inode
->i_sb
;
1097 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
1098 struct buffer_head
*bh
= NULL
;
1099 handle_t
*handle
= NULL
;
1100 struct dquot
*transfer_to
[MAXQUOTAS
] = { };
1103 trace_ocfs2_setattr(inode
, dentry
,
1104 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1105 dentry
->d_name
.len
, dentry
->d_name
.name
,
1106 attr
->ia_valid
, attr
->ia_mode
,
1107 attr
->ia_uid
, attr
->ia_gid
);
1109 /* ensuring we don't even attempt to truncate a symlink */
1110 if (S_ISLNK(inode
->i_mode
))
1111 attr
->ia_valid
&= ~ATTR_SIZE
;
1113 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1114 | ATTR_GID | ATTR_UID | ATTR_MODE)
1115 if (!(attr
->ia_valid
& OCFS2_VALID_ATTRS
))
1118 status
= inode_change_ok(inode
, attr
);
1122 if (is_quota_modification(inode
, attr
))
1123 dquot_initialize(inode
);
1124 size_change
= S_ISREG(inode
->i_mode
) && attr
->ia_valid
& ATTR_SIZE
;
1126 status
= ocfs2_rw_lock(inode
, 1);
1133 status
= ocfs2_inode_lock(inode
, &bh
, 1);
1135 if (status
!= -ENOENT
)
1137 goto bail_unlock_rw
;
1140 if (size_change
&& attr
->ia_size
!= i_size_read(inode
)) {
1141 status
= inode_newsize_ok(inode
, attr
->ia_size
);
1145 inode_dio_wait(inode
);
1147 if (i_size_read(inode
) > attr
->ia_size
) {
1148 if (ocfs2_should_order_data(inode
)) {
1149 status
= ocfs2_begin_ordered_truncate(inode
,
1154 status
= ocfs2_truncate_file(inode
, bh
, attr
->ia_size
);
1156 status
= ocfs2_extend_file(inode
, bh
, attr
->ia_size
);
1158 if (status
!= -ENOSPC
)
1165 if ((attr
->ia_valid
& ATTR_UID
&& attr
->ia_uid
!= inode
->i_uid
) ||
1166 (attr
->ia_valid
& ATTR_GID
&& attr
->ia_gid
!= inode
->i_gid
)) {
1168 * Gather pointers to quota structures so that allocation /
1169 * freeing of quota structures happens here and not inside
1170 * dquot_transfer() where we have problems with lock ordering
1172 if (attr
->ia_valid
& ATTR_UID
&& attr
->ia_uid
!= inode
->i_uid
1173 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1174 OCFS2_FEATURE_RO_COMPAT_USRQUOTA
)) {
1175 transfer_to
[USRQUOTA
] = dqget(sb
, attr
->ia_uid
,
1177 if (!transfer_to
[USRQUOTA
]) {
1182 if (attr
->ia_valid
& ATTR_GID
&& attr
->ia_gid
!= inode
->i_gid
1183 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1184 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA
)) {
1185 transfer_to
[GRPQUOTA
] = dqget(sb
, attr
->ia_gid
,
1187 if (!transfer_to
[GRPQUOTA
]) {
1192 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
+
1193 2 * ocfs2_quota_trans_credits(sb
));
1194 if (IS_ERR(handle
)) {
1195 status
= PTR_ERR(handle
);
1199 status
= __dquot_transfer(inode
, transfer_to
);
1203 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1204 if (IS_ERR(handle
)) {
1205 status
= PTR_ERR(handle
);
1212 * This will intentionally not wind up calling truncate_setsize(),
1213 * since all the work for a size change has been done above.
1214 * Otherwise, we could get into problems with truncate as
1215 * ip_alloc_sem is used there to protect against i_size
1218 * XXX: this means the conditional below can probably be removed.
1220 if ((attr
->ia_valid
& ATTR_SIZE
) &&
1221 attr
->ia_size
!= i_size_read(inode
)) {
1222 status
= vmtruncate(inode
, attr
->ia_size
);
1229 setattr_copy(inode
, attr
);
1230 mark_inode_dirty(inode
);
1232 status
= ocfs2_mark_inode_dirty(handle
, inode
, bh
);
1237 ocfs2_commit_trans(osb
, handle
);
1239 ocfs2_inode_unlock(inode
, 1);
1242 ocfs2_rw_unlock(inode
, 1);
1246 /* Release quota pointers in case we acquired them */
1247 for (qtype
= 0; qtype
< MAXQUOTAS
; qtype
++)
1248 dqput(transfer_to
[qtype
]);
1250 if (!status
&& attr
->ia_valid
& ATTR_MODE
) {
1251 status
= ocfs2_acl_chmod(inode
);
1259 int ocfs2_getattr(struct vfsmount
*mnt
,
1260 struct dentry
*dentry
,
1263 struct inode
*inode
= dentry
->d_inode
;
1264 struct super_block
*sb
= dentry
->d_inode
->i_sb
;
1265 struct ocfs2_super
*osb
= sb
->s_fs_info
;
1268 err
= ocfs2_inode_revalidate(dentry
);
1275 generic_fillattr(inode
, stat
);
1277 /* We set the blksize from the cluster size for performance */
1278 stat
->blksize
= osb
->s_clustersize
;
1284 int ocfs2_permission(struct inode
*inode
, int mask
)
1288 if (mask
& MAY_NOT_BLOCK
)
1291 ret
= ocfs2_inode_lock(inode
, NULL
, 0);
1298 ret
= generic_permission(inode
, mask
);
1300 ocfs2_inode_unlock(inode
, 0);
1305 static int __ocfs2_write_remove_suid(struct inode
*inode
,
1306 struct buffer_head
*bh
)
1310 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1311 struct ocfs2_dinode
*di
;
1313 trace_ocfs2_write_remove_suid(
1314 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1317 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1318 if (IS_ERR(handle
)) {
1319 ret
= PTR_ERR(handle
);
1324 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
1325 OCFS2_JOURNAL_ACCESS_WRITE
);
1331 inode
->i_mode
&= ~S_ISUID
;
1332 if ((inode
->i_mode
& S_ISGID
) && (inode
->i_mode
& S_IXGRP
))
1333 inode
->i_mode
&= ~S_ISGID
;
1335 di
= (struct ocfs2_dinode
*) bh
->b_data
;
1336 di
->i_mode
= cpu_to_le16(inode
->i_mode
);
1338 ocfs2_journal_dirty(handle
, bh
);
1341 ocfs2_commit_trans(osb
, handle
);
1347 * Will look for holes and unwritten extents in the range starting at
1348 * pos for count bytes (inclusive).
1350 static int ocfs2_check_range_for_holes(struct inode
*inode
, loff_t pos
,
1354 unsigned int extent_flags
;
1355 u32 cpos
, clusters
, extent_len
, phys_cpos
;
1356 struct super_block
*sb
= inode
->i_sb
;
1358 cpos
= pos
>> OCFS2_SB(sb
)->s_clustersize_bits
;
1359 clusters
= ocfs2_clusters_for_bytes(sb
, pos
+ count
) - cpos
;
1362 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
, &extent_len
,
1369 if (phys_cpos
== 0 || (extent_flags
& OCFS2_EXT_UNWRITTEN
)) {
1374 if (extent_len
> clusters
)
1375 extent_len
= clusters
;
1377 clusters
-= extent_len
;
1384 static int ocfs2_write_remove_suid(struct inode
*inode
)
1387 struct buffer_head
*bh
= NULL
;
1389 ret
= ocfs2_read_inode_block(inode
, &bh
);
1395 ret
= __ocfs2_write_remove_suid(inode
, bh
);
1402 * Allocate enough extents to cover the region starting at byte offset
1403 * start for len bytes. Existing extents are skipped, any extents
1404 * added are marked as "unwritten".
1406 static int ocfs2_allocate_unwritten_extents(struct inode
*inode
,
1410 u32 cpos
, phys_cpos
, clusters
, alloc_size
;
1411 u64 end
= start
+ len
;
1412 struct buffer_head
*di_bh
= NULL
;
1414 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1415 ret
= ocfs2_read_inode_block(inode
, &di_bh
);
1422 * Nothing to do if the requested reservation range
1423 * fits within the inode.
1425 if (ocfs2_size_fits_inline_data(di_bh
, end
))
1428 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
1436 * We consider both start and len to be inclusive.
1438 cpos
= start
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
1439 clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
, start
+ len
);
1443 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
,
1451 * Hole or existing extent len can be arbitrary, so
1452 * cap it to our own allocation request.
1454 if (alloc_size
> clusters
)
1455 alloc_size
= clusters
;
1459 * We already have an allocation at this
1460 * region so we can safely skip it.
1465 ret
= __ocfs2_extend_allocation(inode
, cpos
, alloc_size
, 1);
1474 clusters
-= alloc_size
;
1485 * Truncate a byte range, avoiding pages within partial clusters. This
1486 * preserves those pages for the zeroing code to write to.
1488 static void ocfs2_truncate_cluster_pages(struct inode
*inode
, u64 byte_start
,
1491 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1493 struct address_space
*mapping
= inode
->i_mapping
;
1495 start
= (loff_t
)ocfs2_align_bytes_to_clusters(inode
->i_sb
, byte_start
);
1496 end
= byte_start
+ byte_len
;
1497 end
= end
& ~(osb
->s_clustersize
- 1);
1500 unmap_mapping_range(mapping
, start
, end
- start
, 0);
1501 truncate_inode_pages_range(mapping
, start
, end
- 1);
1505 static int ocfs2_zero_partial_clusters(struct inode
*inode
,
1509 u64 tmpend
, end
= start
+ len
;
1510 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1511 unsigned int csize
= osb
->s_clustersize
;
1515 * The "start" and "end" values are NOT necessarily part of
1516 * the range whose allocation is being deleted. Rather, this
1517 * is what the user passed in with the request. We must zero
1518 * partial clusters here. There's no need to worry about
1519 * physical allocation - the zeroing code knows to skip holes.
1521 trace_ocfs2_zero_partial_clusters(
1522 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1523 (unsigned long long)start
, (unsigned long long)end
);
1526 * If both edges are on a cluster boundary then there's no
1527 * zeroing required as the region is part of the allocation to
1530 if ((start
& (csize
- 1)) == 0 && (end
& (csize
- 1)) == 0)
1533 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1534 if (IS_ERR(handle
)) {
1535 ret
= PTR_ERR(handle
);
1541 * We want to get the byte offset of the end of the 1st cluster.
1543 tmpend
= (u64
)osb
->s_clustersize
+ (start
& ~(osb
->s_clustersize
- 1));
1547 trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start
,
1548 (unsigned long long)tmpend
);
1550 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
, tmpend
);
1556 * This may make start and end equal, but the zeroing
1557 * code will skip any work in that case so there's no
1558 * need to catch it up here.
1560 start
= end
& ~(osb
->s_clustersize
- 1);
1562 trace_ocfs2_zero_partial_clusters_range2(
1563 (unsigned long long)start
, (unsigned long long)end
);
1565 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
, end
);
1570 ocfs2_commit_trans(osb
, handle
);
1575 static int ocfs2_find_rec(struct ocfs2_extent_list
*el
, u32 pos
)
1578 struct ocfs2_extent_rec
*rec
= NULL
;
1580 for (i
= le16_to_cpu(el
->l_next_free_rec
) - 1; i
>= 0; i
--) {
1582 rec
= &el
->l_recs
[i
];
1584 if (le32_to_cpu(rec
->e_cpos
) < pos
)
1592 * Helper to calculate the punching pos and length in one run, we handle the
1593 * following three cases in order:
1595 * - remove the entire record
1596 * - remove a partial record
1597 * - no record needs to be removed (hole-punching completed)
1599 static void ocfs2_calc_trunc_pos(struct inode
*inode
,
1600 struct ocfs2_extent_list
*el
,
1601 struct ocfs2_extent_rec
*rec
,
1602 u32 trunc_start
, u32
*trunc_cpos
,
1603 u32
*trunc_len
, u32
*trunc_end
,
1604 u64
*blkno
, int *done
)
1609 range
= le32_to_cpu(rec
->e_cpos
) + ocfs2_rec_clusters(el
, rec
);
1611 if (le32_to_cpu(rec
->e_cpos
) >= trunc_start
) {
1613 * remove an entire extent record.
1615 *trunc_cpos
= le32_to_cpu(rec
->e_cpos
);
1617 * Skip holes if any.
1619 if (range
< *trunc_end
)
1621 *trunc_len
= *trunc_end
- le32_to_cpu(rec
->e_cpos
);
1622 *blkno
= le64_to_cpu(rec
->e_blkno
);
1623 *trunc_end
= le32_to_cpu(rec
->e_cpos
);
1624 } else if (range
> trunc_start
) {
1626 * remove a partial extent record, which means we're
1627 * removing the last extent record.
1629 *trunc_cpos
= trunc_start
;
1633 if (range
< *trunc_end
)
1635 *trunc_len
= *trunc_end
- trunc_start
;
1636 coff
= trunc_start
- le32_to_cpu(rec
->e_cpos
);
1637 *blkno
= le64_to_cpu(rec
->e_blkno
) +
1638 ocfs2_clusters_to_blocks(inode
->i_sb
, coff
);
1639 *trunc_end
= trunc_start
;
1642 * It may have two following possibilities:
1644 * - last record has been removed
1645 * - trunc_start was within a hole
1647 * both two cases mean the completion of hole punching.
1655 static int ocfs2_remove_inode_range(struct inode
*inode
,
1656 struct buffer_head
*di_bh
, u64 byte_start
,
1659 int ret
= 0, flags
= 0, done
= 0, i
;
1660 u32 trunc_start
, trunc_len
, trunc_end
, trunc_cpos
, phys_cpos
;
1662 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1663 struct ocfs2_cached_dealloc_ctxt dealloc
;
1664 struct address_space
*mapping
= inode
->i_mapping
;
1665 struct ocfs2_extent_tree et
;
1666 struct ocfs2_path
*path
= NULL
;
1667 struct ocfs2_extent_list
*el
= NULL
;
1668 struct ocfs2_extent_rec
*rec
= NULL
;
1669 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
1670 u64 blkno
, refcount_loc
= le64_to_cpu(di
->i_refcount_loc
);
1672 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), di_bh
);
1673 ocfs2_init_dealloc_ctxt(&dealloc
);
1675 trace_ocfs2_remove_inode_range(
1676 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1677 (unsigned long long)byte_start
,
1678 (unsigned long long)byte_len
);
1683 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1684 ret
= ocfs2_truncate_inline(inode
, di_bh
, byte_start
,
1685 byte_start
+ byte_len
, 0);
1691 * There's no need to get fancy with the page cache
1692 * truncate of an inline-data inode. We're talking
1693 * about less than a page here, which will be cached
1694 * in the dinode buffer anyway.
1696 unmap_mapping_range(mapping
, 0, 0, 0);
1697 truncate_inode_pages(mapping
, 0);
1702 * For reflinks, we may need to CoW 2 clusters which might be
1703 * partially zero'd later, if hole's start and end offset were
1704 * within one cluster(means is not exactly aligned to clustersize).
1707 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
) {
1709 ret
= ocfs2_cow_file_pos(inode
, di_bh
, byte_start
);
1715 ret
= ocfs2_cow_file_pos(inode
, di_bh
, byte_start
+ byte_len
);
1722 trunc_start
= ocfs2_clusters_for_bytes(osb
->sb
, byte_start
);
1723 trunc_end
= (byte_start
+ byte_len
) >> osb
->s_clustersize_bits
;
1724 cluster_in_el
= trunc_end
;
1726 ret
= ocfs2_zero_partial_clusters(inode
, byte_start
, byte_len
);
1732 path
= ocfs2_new_path_from_et(&et
);
1739 while (trunc_end
> trunc_start
) {
1741 ret
= ocfs2_find_path(INODE_CACHE(inode
), path
,
1748 el
= path_leaf_el(path
);
1750 i
= ocfs2_find_rec(el
, trunc_end
);
1752 * Need to go to previous extent block.
1755 if (path
->p_tree_depth
== 0)
1758 ret
= ocfs2_find_cpos_for_left_leaf(inode
->i_sb
,
1767 * We've reached the leftmost extent block,
1768 * it's safe to leave.
1770 if (cluster_in_el
== 0)
1774 * The 'pos' searched for previous extent block is
1775 * always one cluster less than actual trunc_end.
1777 trunc_end
= cluster_in_el
+ 1;
1779 ocfs2_reinit_path(path
, 1);
1784 rec
= &el
->l_recs
[i
];
1786 ocfs2_calc_trunc_pos(inode
, el
, rec
, trunc_start
, &trunc_cpos
,
1787 &trunc_len
, &trunc_end
, &blkno
, &done
);
1791 flags
= rec
->e_flags
;
1792 phys_cpos
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
1794 ret
= ocfs2_remove_btree_range(inode
, &et
, trunc_cpos
,
1795 phys_cpos
, trunc_len
, flags
,
1796 &dealloc
, refcount_loc
);
1802 cluster_in_el
= trunc_end
;
1804 ocfs2_reinit_path(path
, 1);
1807 ocfs2_truncate_cluster_pages(inode
, byte_start
, byte_len
);
1810 ocfs2_schedule_truncate_log_flush(osb
, 1);
1811 ocfs2_run_deallocs(osb
, &dealloc
);
1817 * Parts of this function taken from xfs_change_file_space()
1819 static int __ocfs2_change_file_space(struct file
*file
, struct inode
*inode
,
1820 loff_t f_pos
, unsigned int cmd
,
1821 struct ocfs2_space_resv
*sr
,
1827 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1828 struct buffer_head
*di_bh
= NULL
;
1830 unsigned long long max_off
= inode
->i_sb
->s_maxbytes
;
1832 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
1835 mutex_lock(&inode
->i_mutex
);
1838 * This prevents concurrent writes on other nodes
1840 ret
= ocfs2_rw_lock(inode
, 1);
1846 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
1852 if (inode
->i_flags
& (S_IMMUTABLE
|S_APPEND
)) {
1854 goto out_inode_unlock
;
1857 switch (sr
->l_whence
) {
1858 case 0: /*SEEK_SET*/
1860 case 1: /*SEEK_CUR*/
1861 sr
->l_start
+= f_pos
;
1863 case 2: /*SEEK_END*/
1864 sr
->l_start
+= i_size_read(inode
);
1868 goto out_inode_unlock
;
1872 llen
= sr
->l_len
> 0 ? sr
->l_len
- 1 : sr
->l_len
;
1875 || sr
->l_start
> max_off
1876 || (sr
->l_start
+ llen
) < 0
1877 || (sr
->l_start
+ llen
) > max_off
) {
1879 goto out_inode_unlock
;
1881 size
= sr
->l_start
+ sr
->l_len
;
1883 if (cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
) {
1884 if (sr
->l_len
<= 0) {
1886 goto out_inode_unlock
;
1890 if (file
&& should_remove_suid(file
->f_path
.dentry
)) {
1891 ret
= __ocfs2_write_remove_suid(inode
, di_bh
);
1894 goto out_inode_unlock
;
1898 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1900 case OCFS2_IOC_RESVSP
:
1901 case OCFS2_IOC_RESVSP64
:
1903 * This takes unsigned offsets, but the signed ones we
1904 * pass have been checked against overflow above.
1906 ret
= ocfs2_allocate_unwritten_extents(inode
, sr
->l_start
,
1909 case OCFS2_IOC_UNRESVSP
:
1910 case OCFS2_IOC_UNRESVSP64
:
1911 ret
= ocfs2_remove_inode_range(inode
, di_bh
, sr
->l_start
,
1917 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1920 goto out_inode_unlock
;
1924 * We update c/mtime for these changes
1926 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1927 if (IS_ERR(handle
)) {
1928 ret
= PTR_ERR(handle
);
1930 goto out_inode_unlock
;
1933 if (change_size
&& i_size_read(inode
) < size
)
1934 i_size_write(inode
, size
);
1936 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
1937 ret
= ocfs2_mark_inode_dirty(handle
, inode
, di_bh
);
1941 ocfs2_commit_trans(osb
, handle
);
1945 ocfs2_inode_unlock(inode
, 1);
1947 ocfs2_rw_unlock(inode
, 1);
1950 mutex_unlock(&inode
->i_mutex
);
1954 int ocfs2_change_file_space(struct file
*file
, unsigned int cmd
,
1955 struct ocfs2_space_resv
*sr
)
1957 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1958 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1960 if ((cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
) &&
1961 !ocfs2_writes_unwritten_extents(osb
))
1963 else if ((cmd
== OCFS2_IOC_UNRESVSP
|| cmd
== OCFS2_IOC_UNRESVSP64
) &&
1964 !ocfs2_sparse_alloc(osb
))
1967 if (!S_ISREG(inode
->i_mode
))
1970 if (!(file
->f_mode
& FMODE_WRITE
))
1973 return __ocfs2_change_file_space(file
, inode
, file
->f_pos
, cmd
, sr
, 0);
1976 static long ocfs2_fallocate(struct file
*file
, int mode
, loff_t offset
,
1979 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1980 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1981 struct ocfs2_space_resv sr
;
1982 int change_size
= 1;
1983 int cmd
= OCFS2_IOC_RESVSP64
;
1985 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
1987 if (!ocfs2_writes_unwritten_extents(osb
))
1990 if (mode
& FALLOC_FL_KEEP_SIZE
)
1993 if (mode
& FALLOC_FL_PUNCH_HOLE
)
1994 cmd
= OCFS2_IOC_UNRESVSP64
;
1997 sr
.l_start
= (s64
)offset
;
1998 sr
.l_len
= (s64
)len
;
2000 return __ocfs2_change_file_space(NULL
, inode
, offset
, cmd
, &sr
,
2004 int ocfs2_check_range_for_refcount(struct inode
*inode
, loff_t pos
,
2008 unsigned int extent_flags
;
2009 u32 cpos
, clusters
, extent_len
, phys_cpos
;
2010 struct super_block
*sb
= inode
->i_sb
;
2012 if (!ocfs2_refcount_tree(OCFS2_SB(inode
->i_sb
)) ||
2013 !(OCFS2_I(inode
)->ip_dyn_features
& OCFS2_HAS_REFCOUNT_FL
) ||
2014 OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
)
2017 cpos
= pos
>> OCFS2_SB(sb
)->s_clustersize_bits
;
2018 clusters
= ocfs2_clusters_for_bytes(sb
, pos
+ count
) - cpos
;
2021 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
, &extent_len
,
2028 if (phys_cpos
&& (extent_flags
& OCFS2_EXT_REFCOUNTED
)) {
2033 if (extent_len
> clusters
)
2034 extent_len
= clusters
;
2036 clusters
-= extent_len
;
2043 static int ocfs2_prepare_inode_for_refcount(struct inode
*inode
,
2045 loff_t pos
, size_t count
,
2049 struct buffer_head
*di_bh
= NULL
;
2050 u32 cpos
= pos
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
2052 ocfs2_clusters_for_bytes(inode
->i_sb
, pos
+ count
) - cpos
;
2054 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
2062 ret
= ocfs2_refcount_cow(inode
, file
, di_bh
, cpos
, clusters
, UINT_MAX
);
2070 static int ocfs2_prepare_inode_for_write(struct file
*file
,
2077 int ret
= 0, meta_level
= 0;
2078 struct dentry
*dentry
= file
->f_path
.dentry
;
2079 struct inode
*inode
= dentry
->d_inode
;
2080 loff_t saved_pos
= 0, end
;
2083 * We start with a read level meta lock and only jump to an ex
2084 * if we need to make modifications here.
2087 ret
= ocfs2_inode_lock(inode
, NULL
, meta_level
);
2094 /* Clear suid / sgid if necessary. We do this here
2095 * instead of later in the write path because
2096 * remove_suid() calls ->setattr without any hint that
2097 * we may have already done our cluster locking. Since
2098 * ocfs2_setattr() *must* take cluster locks to
2099 * proceeed, this will lead us to recursively lock the
2100 * inode. There's also the dinode i_size state which
2101 * can be lost via setattr during extending writes (we
2102 * set inode->i_size at the end of a write. */
2103 if (should_remove_suid(dentry
)) {
2104 if (meta_level
== 0) {
2105 ocfs2_inode_unlock(inode
, meta_level
);
2110 ret
= ocfs2_write_remove_suid(inode
);
2117 /* work on a copy of ppos until we're sure that we won't have
2118 * to recalculate it due to relocking. */
2120 saved_pos
= i_size_read(inode
);
2124 end
= saved_pos
+ count
;
2126 ret
= ocfs2_check_range_for_refcount(inode
, saved_pos
, count
);
2128 ocfs2_inode_unlock(inode
, meta_level
);
2131 ret
= ocfs2_prepare_inode_for_refcount(inode
,
2148 * Skip the O_DIRECT checks if we don't need
2151 if (!direct_io
|| !(*direct_io
))
2155 * There's no sane way to do direct writes to an inode
2158 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
2164 * Allowing concurrent direct writes means
2165 * i_size changes wouldn't be synchronized, so
2166 * one node could wind up truncating another
2169 if (end
> i_size_read(inode
)) {
2175 * We don't fill holes during direct io, so
2176 * check for them here. If any are found, the
2177 * caller will have to retake some cluster
2178 * locks and initiate the io as buffered.
2180 ret
= ocfs2_check_range_for_holes(inode
, saved_pos
, count
);
2193 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode
)->ip_blkno
,
2194 saved_pos
, appending
, count
,
2195 direct_io
, has_refcount
);
2197 if (meta_level
>= 0)
2198 ocfs2_inode_unlock(inode
, meta_level
);
2204 static ssize_t
ocfs2_file_aio_write(struct kiocb
*iocb
,
2205 const struct iovec
*iov
,
2206 unsigned long nr_segs
,
2209 int ret
, direct_io
, appending
, rw_level
, have_alloc_sem
= 0;
2210 int can_do_direct
, has_refcount
= 0;
2211 ssize_t written
= 0;
2212 size_t ocount
; /* original count */
2213 size_t count
; /* after file limit checks */
2214 loff_t old_size
, *ppos
= &iocb
->ki_pos
;
2216 struct file
*file
= iocb
->ki_filp
;
2217 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2218 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2219 int full_coherency
= !(osb
->s_mount_opt
&
2220 OCFS2_MOUNT_COHERENCY_BUFFERED
);
2222 trace_ocfs2_file_aio_write(inode
, file
, file
->f_path
.dentry
,
2223 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2224 file
->f_path
.dentry
->d_name
.len
,
2225 file
->f_path
.dentry
->d_name
.name
,
2226 (unsigned int)nr_segs
);
2228 if (iocb
->ki_left
== 0)
2231 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
2233 appending
= file
->f_flags
& O_APPEND
? 1 : 0;
2234 direct_io
= file
->f_flags
& O_DIRECT
? 1 : 0;
2236 mutex_lock(&inode
->i_mutex
);
2238 ocfs2_iocb_clear_sem_locked(iocb
);
2241 /* to match setattr's i_mutex -> rw_lock ordering */
2243 atomic_inc(&inode
->i_dio_count
);
2245 /* communicate with ocfs2_dio_end_io */
2246 ocfs2_iocb_set_sem_locked(iocb
);
2250 * Concurrent O_DIRECT writes are allowed with
2251 * mount_option "coherency=buffered".
2253 rw_level
= (!direct_io
|| full_coherency
);
2255 ret
= ocfs2_rw_lock(inode
, rw_level
);
2262 * O_DIRECT writes with "coherency=full" need to take EX cluster
2263 * inode_lock to guarantee coherency.
2265 if (direct_io
&& full_coherency
) {
2267 * We need to take and drop the inode lock to force
2268 * other nodes to drop their caches. Buffered I/O
2269 * already does this in write_begin().
2271 ret
= ocfs2_inode_lock(inode
, NULL
, 1);
2277 ocfs2_inode_unlock(inode
, 1);
2280 can_do_direct
= direct_io
;
2281 ret
= ocfs2_prepare_inode_for_write(file
, ppos
,
2282 iocb
->ki_left
, appending
,
2283 &can_do_direct
, &has_refcount
);
2290 * We can't complete the direct I/O as requested, fall back to
2293 if (direct_io
&& !can_do_direct
) {
2294 ocfs2_rw_unlock(inode
, rw_level
);
2295 inode_dio_done(inode
);
2305 * To later detect whether a journal commit for sync writes is
2306 * necessary, we sample i_size, and cluster count here.
2308 old_size
= i_size_read(inode
);
2309 old_clusters
= OCFS2_I(inode
)->ip_clusters
;
2311 /* communicate with ocfs2_dio_end_io */
2312 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
2314 ret
= generic_segment_checks(iov
, &nr_segs
, &ocount
,
2320 ret
= generic_write_checks(file
, ppos
, &count
,
2321 S_ISBLK(inode
->i_mode
));
2326 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, *ppos
,
2327 ppos
, count
, ocount
);
2333 current
->backing_dev_info
= file
->f_mapping
->backing_dev_info
;
2334 written
= generic_file_buffered_write(iocb
, iov
, nr_segs
, *ppos
,
2336 current
->backing_dev_info
= NULL
;
2340 /* buffered aio wouldn't have proper lock coverage today */
2341 BUG_ON(ret
== -EIOCBQUEUED
&& !(file
->f_flags
& O_DIRECT
));
2343 if (((file
->f_flags
& O_DSYNC
) && !direct_io
) || IS_SYNC(inode
) ||
2344 ((file
->f_flags
& O_DIRECT
) && !direct_io
)) {
2345 ret
= filemap_fdatawrite_range(file
->f_mapping
, pos
,
2350 if (!ret
&& ((old_size
!= i_size_read(inode
)) ||
2351 (old_clusters
!= OCFS2_I(inode
)->ip_clusters
) ||
2353 ret
= jbd2_journal_force_commit(osb
->journal
->j_journal
);
2359 ret
= filemap_fdatawait_range(file
->f_mapping
, pos
,
2364 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2365 * function pointer which is called when o_direct io completes so that
2366 * it can unlock our rw lock.
2367 * Unfortunately there are error cases which call end_io and others
2368 * that don't. so we don't have to unlock the rw_lock if either an
2369 * async dio is going to do it in the future or an end_io after an
2370 * error has already done it.
2372 if ((ret
== -EIOCBQUEUED
) || (!ocfs2_iocb_is_rw_locked(iocb
))) {
2379 ocfs2_rw_unlock(inode
, rw_level
);
2382 if (have_alloc_sem
) {
2383 inode_dio_done(inode
);
2384 ocfs2_iocb_clear_sem_locked(iocb
);
2387 mutex_unlock(&inode
->i_mutex
);
2394 static int ocfs2_splice_to_file(struct pipe_inode_info
*pipe
,
2396 struct splice_desc
*sd
)
2400 ret
= ocfs2_prepare_inode_for_write(out
, &sd
->pos
,
2401 sd
->total_len
, 0, NULL
, NULL
);
2407 return splice_from_pipe_feed(pipe
, sd
, pipe_to_file
);
2410 static ssize_t
ocfs2_file_splice_write(struct pipe_inode_info
*pipe
,
2417 struct address_space
*mapping
= out
->f_mapping
;
2418 struct inode
*inode
= mapping
->host
;
2419 struct splice_desc sd
= {
2427 trace_ocfs2_file_splice_write(inode
, out
, out
->f_path
.dentry
,
2428 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2429 out
->f_path
.dentry
->d_name
.len
,
2430 out
->f_path
.dentry
->d_name
.name
, len
);
2433 mutex_lock_nested(&pipe
->inode
->i_mutex
, I_MUTEX_PARENT
);
2435 splice_from_pipe_begin(&sd
);
2437 ret
= splice_from_pipe_next(pipe
, &sd
);
2441 mutex_lock_nested(&inode
->i_mutex
, I_MUTEX_CHILD
);
2442 ret
= ocfs2_rw_lock(inode
, 1);
2446 ret
= ocfs2_splice_to_file(pipe
, out
, &sd
);
2447 ocfs2_rw_unlock(inode
, 1);
2449 mutex_unlock(&inode
->i_mutex
);
2451 splice_from_pipe_end(pipe
, &sd
);
2454 mutex_unlock(&pipe
->inode
->i_mutex
);
2457 ret
= sd
.num_spliced
;
2460 unsigned long nr_pages
;
2463 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
2465 err
= generic_write_sync(out
, *ppos
, ret
);
2471 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
2477 static ssize_t
ocfs2_file_splice_read(struct file
*in
,
2479 struct pipe_inode_info
*pipe
,
2483 int ret
= 0, lock_level
= 0;
2484 struct inode
*inode
= in
->f_path
.dentry
->d_inode
;
2486 trace_ocfs2_file_splice_read(inode
, in
, in
->f_path
.dentry
,
2487 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2488 in
->f_path
.dentry
->d_name
.len
,
2489 in
->f_path
.dentry
->d_name
.name
, len
);
2492 * See the comment in ocfs2_file_aio_read()
2494 ret
= ocfs2_inode_lock_atime(inode
, in
->f_vfsmnt
, &lock_level
);
2499 ocfs2_inode_unlock(inode
, lock_level
);
2501 ret
= generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
2507 static ssize_t
ocfs2_file_aio_read(struct kiocb
*iocb
,
2508 const struct iovec
*iov
,
2509 unsigned long nr_segs
,
2512 int ret
= 0, rw_level
= -1, have_alloc_sem
= 0, lock_level
= 0;
2513 struct file
*filp
= iocb
->ki_filp
;
2514 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
2516 trace_ocfs2_file_aio_read(inode
, filp
, filp
->f_path
.dentry
,
2517 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2518 filp
->f_path
.dentry
->d_name
.len
,
2519 filp
->f_path
.dentry
->d_name
.name
, nr_segs
);
2528 ocfs2_iocb_clear_sem_locked(iocb
);
2531 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2532 * need locks to protect pending reads from racing with truncate.
2534 if (filp
->f_flags
& O_DIRECT
) {
2536 atomic_inc(&inode
->i_dio_count
);
2537 ocfs2_iocb_set_sem_locked(iocb
);
2539 ret
= ocfs2_rw_lock(inode
, 0);
2545 /* communicate with ocfs2_dio_end_io */
2546 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
2550 * We're fine letting folks race truncates and extending
2551 * writes with read across the cluster, just like they can
2552 * locally. Hence no rw_lock during read.
2554 * Take and drop the meta data lock to update inode fields
2555 * like i_size. This allows the checks down below
2556 * generic_file_aio_read() a chance of actually working.
2558 ret
= ocfs2_inode_lock_atime(inode
, filp
->f_vfsmnt
, &lock_level
);
2563 ocfs2_inode_unlock(inode
, lock_level
);
2565 ret
= generic_file_aio_read(iocb
, iov
, nr_segs
, iocb
->ki_pos
);
2566 trace_generic_file_aio_read_ret(ret
);
2568 /* buffered aio wouldn't have proper lock coverage today */
2569 BUG_ON(ret
== -EIOCBQUEUED
&& !(filp
->f_flags
& O_DIRECT
));
2571 /* see ocfs2_file_aio_write */
2572 if (ret
== -EIOCBQUEUED
|| !ocfs2_iocb_is_rw_locked(iocb
)) {
2578 if (have_alloc_sem
) {
2579 inode_dio_done(inode
);
2580 ocfs2_iocb_clear_sem_locked(iocb
);
2583 ocfs2_rw_unlock(inode
, rw_level
);
2588 const struct inode_operations ocfs2_file_iops
= {
2589 .setattr
= ocfs2_setattr
,
2590 .getattr
= ocfs2_getattr
,
2591 .permission
= ocfs2_permission
,
2592 .setxattr
= generic_setxattr
,
2593 .getxattr
= generic_getxattr
,
2594 .listxattr
= ocfs2_listxattr
,
2595 .removexattr
= generic_removexattr
,
2596 .fiemap
= ocfs2_fiemap
,
2597 .check_acl
= ocfs2_check_acl
,
2600 const struct inode_operations ocfs2_special_file_iops
= {
2601 .setattr
= ocfs2_setattr
,
2602 .getattr
= ocfs2_getattr
,
2603 .permission
= ocfs2_permission
,
2604 .check_acl
= ocfs2_check_acl
,
2608 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2609 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2611 const struct file_operations ocfs2_fops
= {
2612 .llseek
= generic_file_llseek
,
2613 .read
= do_sync_read
,
2614 .write
= do_sync_write
,
2616 .fsync
= ocfs2_sync_file
,
2617 .release
= ocfs2_file_release
,
2618 .open
= ocfs2_file_open
,
2619 .aio_read
= ocfs2_file_aio_read
,
2620 .aio_write
= ocfs2_file_aio_write
,
2621 .unlocked_ioctl
= ocfs2_ioctl
,
2622 #ifdef CONFIG_COMPAT
2623 .compat_ioctl
= ocfs2_compat_ioctl
,
2626 .flock
= ocfs2_flock
,
2627 .splice_read
= ocfs2_file_splice_read
,
2628 .splice_write
= ocfs2_file_splice_write
,
2629 .fallocate
= ocfs2_fallocate
,
2632 const struct file_operations ocfs2_dops
= {
2633 .llseek
= generic_file_llseek
,
2634 .read
= generic_read_dir
,
2635 .readdir
= ocfs2_readdir
,
2636 .fsync
= ocfs2_sync_file
,
2637 .release
= ocfs2_dir_release
,
2638 .open
= ocfs2_dir_open
,
2639 .unlocked_ioctl
= ocfs2_ioctl
,
2640 #ifdef CONFIG_COMPAT
2641 .compat_ioctl
= ocfs2_compat_ioctl
,
2644 .flock
= ocfs2_flock
,
2648 * POSIX-lockless variants of our file_operations.
2650 * These will be used if the underlying cluster stack does not support
2651 * posix file locking, if the user passes the "localflocks" mount
2652 * option, or if we have a local-only fs.
2654 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2655 * so we still want it in the case of no stack support for
2656 * plocks. Internally, it will do the right thing when asked to ignore
2659 const struct file_operations ocfs2_fops_no_plocks
= {
2660 .llseek
= generic_file_llseek
,
2661 .read
= do_sync_read
,
2662 .write
= do_sync_write
,
2664 .fsync
= ocfs2_sync_file
,
2665 .release
= ocfs2_file_release
,
2666 .open
= ocfs2_file_open
,
2667 .aio_read
= ocfs2_file_aio_read
,
2668 .aio_write
= ocfs2_file_aio_write
,
2669 .unlocked_ioctl
= ocfs2_ioctl
,
2670 #ifdef CONFIG_COMPAT
2671 .compat_ioctl
= ocfs2_compat_ioctl
,
2673 .flock
= ocfs2_flock
,
2674 .splice_read
= ocfs2_file_splice_read
,
2675 .splice_write
= ocfs2_file_splice_write
,
2676 .fallocate
= ocfs2_fallocate
,
2679 const struct file_operations ocfs2_dops_no_plocks
= {
2680 .llseek
= generic_file_llseek
,
2681 .read
= generic_read_dir
,
2682 .readdir
= ocfs2_readdir
,
2683 .fsync
= ocfs2_sync_file
,
2684 .release
= ocfs2_dir_release
,
2685 .open
= ocfs2_dir_open
,
2686 .unlocked_ioctl
= ocfs2_ioctl
,
2687 #ifdef CONFIG_COMPAT
2688 .compat_ioctl
= ocfs2_compat_ioctl
,
2690 .flock
= ocfs2_flock
,