2 * (C) 2001 Clemson University and The University of Chicago
4 * See COPYING in top-level directory.
8 * Linux VFS file operations.
12 #include "pvfs2-kernel.h"
13 #include "pvfs2-bufmap.h"
15 #include <linux/pagemap.h>
17 #define wake_up_daemon_for_return(op) \
19 spin_lock(&op->lock); \
20 op->io_completed = 1; \
21 spin_unlock(&op->lock); \
22 wake_up_interruptible(&op->io_completion_waitq);\
26 * Copy to client-core's address space from the buffers specified
27 * by the iovec upto total_size bytes.
28 * NOTE: the iovector can either contain addresses which
29 * can futher be kernel-space or user-space addresses.
30 * or it can pointers to struct page's
32 static int precopy_buffers(struct pvfs2_bufmap
*bufmap
,
34 struct iov_iter
*iter
,
39 * copy data from application/kernel by pulling it out
45 ret
= pvfs_bufmap_copy_from_iovec(bufmap
,
50 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
56 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
63 * Copy from client-core's address space to the buffers specified
64 * by the iovec upto total_size bytes.
65 * NOTE: the iovector can either contain addresses which
66 * can futher be kernel-space or user-space addresses.
67 * or it can pointers to struct page's
69 static int postcopy_buffers(struct pvfs2_bufmap
*bufmap
,
71 struct iov_iter
*iter
,
76 * copy data to application/kernel by pushing it out to
77 * the iovec. NOTE; target buffers can be addresses or
78 * struct page pointers.
81 ret
= pvfs_bufmap_copy_to_iovec(bufmap
,
86 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
94 * Post and wait for the I/O upcall to finish
96 static ssize_t
wait_for_direct_io(enum PVFS_io_type type
, struct inode
*inode
,
97 loff_t
*offset
, struct iov_iter
*iter
,
98 size_t total_size
, loff_t readahead_size
)
100 struct pvfs2_inode_s
*pvfs2_inode
= PVFS2_I(inode
);
101 struct pvfs2_khandle
*handle
= &pvfs2_inode
->refn
.khandle
;
102 struct pvfs2_bufmap
*bufmap
= NULL
;
103 struct pvfs2_kernel_op_s
*new_op
= NULL
;
104 int buffer_index
= -1;
107 new_op
= op_alloc(PVFS2_VFS_OP_FILE_IO
);
112 /* synchronous I/O */
113 new_op
->upcall
.req
.io
.async_vfs_io
= PVFS_VFS_SYNC_IO
;
114 new_op
->upcall
.req
.io
.readahead_size
= readahead_size
;
115 new_op
->upcall
.req
.io
.io_type
= type
;
116 new_op
->upcall
.req
.io
.refn
= pvfs2_inode
->refn
;
118 populate_shared_memory
:
119 /* get a shared buffer index */
120 ret
= pvfs_bufmap_get(&bufmap
, &buffer_index
);
122 gossip_debug(GOSSIP_FILE_DEBUG
,
123 "%s: pvfs_bufmap_get failure (%ld)\n",
124 __func__
, (long)ret
);
127 gossip_debug(GOSSIP_FILE_DEBUG
,
128 "%s(%pU): GET op %p -> buffer_index %d\n",
134 new_op
->uses_shared_memory
= 1;
135 new_op
->upcall
.req
.io
.buf_index
= buffer_index
;
136 new_op
->upcall
.req
.io
.count
= total_size
;
137 new_op
->upcall
.req
.io
.offset
= *offset
;
139 gossip_debug(GOSSIP_FILE_DEBUG
,
140 "%s(%pU): offset: %llu total_size: %zd\n",
146 * Stage 1: copy the buffers into client-core's address space
147 * precopy_buffers only pertains to writes.
149 if (type
== PVFS_IO_WRITE
) {
150 ret
= precopy_buffers(bufmap
,
158 gossip_debug(GOSSIP_FILE_DEBUG
,
159 "%s(%pU): Calling post_io_request with tag (%llu)\n",
164 /* Stage 2: Service the I/O operation */
165 ret
= service_operation(new_op
,
166 type
== PVFS_IO_WRITE
?
169 get_interruptible_flag(inode
));
172 * If service_operation() returns -EAGAIN #and# the operation was
173 * purged from pvfs2_request_list or htable_ops_in_progress, then
174 * we know that the client was restarted, causing the shared memory
175 * area to be wiped clean. To restart a write operation in this
176 * case, we must re-copy the data from the user's iovec to a NEW
177 * shared memory location. To restart a read operation, we must get
178 * a new shared memory location.
180 if (ret
== -EAGAIN
&& op_state_purged(new_op
)) {
181 pvfs_bufmap_put(bufmap
, buffer_index
);
182 gossip_debug(GOSSIP_FILE_DEBUG
,
183 "%s:going to repopulate_shared_memory.\n",
185 goto populate_shared_memory
;
189 handle_io_error(); /* defined in pvfs2-kernel.h */
191 * don't write an error to syslog on signaled operation
192 * termination unless we've got debugging turned on, as
193 * this can happen regularly (i.e. ctrl-c)
196 gossip_debug(GOSSIP_FILE_DEBUG
,
197 "%s: returning error %ld\n", __func__
,
200 gossip_err("%s: error in %s handle %pU, returning %zd\n",
202 type
== PVFS_IO_READ
?
203 "read from" : "write to",
209 * Stage 3: Post copy buffers from client-core's address space
210 * postcopy_buffers only pertains to reads.
212 if (type
== PVFS_IO_READ
) {
213 ret
= postcopy_buffers(bufmap
,
216 new_op
->downcall
.resp
.io
.amt_complete
);
219 * put error codes in downcall so that handle_io_error()
220 * preserves it properly
222 new_op
->downcall
.status
= ret
;
227 gossip_debug(GOSSIP_FILE_DEBUG
,
228 "%s(%pU): Amount written as returned by the sys-io call:%d\n",
231 (int)new_op
->downcall
.resp
.io
.amt_complete
);
233 ret
= new_op
->downcall
.resp
.io
.amt_complete
;
236 * tell the device file owner waiting on I/O that this read has
237 * completed and it can return now. in this exact case, on
238 * wakeup the daemon will free the op, so we *cannot* touch it
241 wake_up_daemon_for_return(new_op
);
245 if (buffer_index
>= 0) {
246 pvfs_bufmap_put(bufmap
, buffer_index
);
247 gossip_debug(GOSSIP_FILE_DEBUG
,
248 "%s(%pU): PUT buffer_index %d\n",
249 __func__
, handle
, buffer_index
);
260 * The reason we need to do this is to be able to support readv and writev
261 * that are larger than (pvfs_bufmap_size_query()) Default is
262 * PVFS2_BUFMAP_DEFAULT_DESC_SIZE MB. What that means is that we will
263 * create a new io vec descriptor for those memory addresses that
264 * go beyond the limit. Return value for this routine is negative in case
265 * of errors and 0 in case of success.
267 * Further, the new_nr_segs pointer is updated to hold the new value
268 * of number of iovecs, the new_vec pointer is updated to hold the pointer
269 * to the new split iovec, and the size array is an array of integers holding
270 * the number of iovecs that straddle pvfs_bufmap_size_query().
271 * The max_new_nr_segs value is computed by the caller and returned.
272 * (It will be (count of all iov_len/ block_size) + 1).
274 static int split_iovecs(unsigned long max_new_nr_segs
, /* IN */
275 unsigned long nr_segs
, /* IN */
276 const struct iovec
*original_iovec
, /* IN */
277 unsigned long *new_nr_segs
, /* OUT */
278 struct iovec
**new_vec
, /* OUT */
279 unsigned long *seg_count
, /* OUT */
280 unsigned long **seg_array
) /* OUT */
283 unsigned long count
= 0;
284 unsigned long begin_seg
;
285 unsigned long tmpnew_nr_segs
= 0;
286 struct iovec
*new_iovec
= NULL
;
287 struct iovec
*orig_iovec
;
288 unsigned long *sizes
= NULL
;
289 unsigned long sizes_count
= 0;
292 original_iovec
== NULL
||
293 new_nr_segs
== NULL
||
297 max_new_nr_segs
<= 0) {
298 gossip_err("Invalid parameters to split_iovecs\n");
305 /* copy the passed in iovec descriptor to a temp structure */
306 orig_iovec
= kmalloc_array(nr_segs
,
308 PVFS2_BUFMAP_GFP_FLAGS
);
309 if (orig_iovec
== NULL
) {
311 "split_iovecs: Could not allocate memory for %lu bytes!\n",
312 (unsigned long)(nr_segs
* sizeof(*orig_iovec
)));
315 new_iovec
= kcalloc(max_new_nr_segs
,
317 PVFS2_BUFMAP_GFP_FLAGS
);
318 if (new_iovec
== NULL
) {
321 "split_iovecs: Could not allocate memory for %lu bytes!\n",
322 (unsigned long)(max_new_nr_segs
* sizeof(*new_iovec
)));
325 sizes
= kcalloc(max_new_nr_segs
,
327 PVFS2_BUFMAP_GFP_FLAGS
);
332 "split_iovecs: Could not allocate memory for %lu bytes!\n",
333 (unsigned long)(max_new_nr_segs
* sizeof(*sizes
)));
336 /* copy the passed in iovec to a temp structure */
337 memcpy(orig_iovec
, original_iovec
, nr_segs
* sizeof(*orig_iovec
));
340 for (seg
= begin_seg
; seg
< nr_segs
; seg
++) {
341 if (tmpnew_nr_segs
>= max_new_nr_segs
||
342 sizes_count
>= max_new_nr_segs
) {
347 ("split_iovecs: exceeded the index limit (%lu)\n",
351 if (count
+ orig_iovec
[seg
].iov_len
<
352 pvfs_bufmap_size_query()) {
353 count
+= orig_iovec
[seg
].iov_len
;
354 memcpy(&new_iovec
[tmpnew_nr_segs
],
358 sizes
[sizes_count
]++;
360 new_iovec
[tmpnew_nr_segs
].iov_base
=
361 orig_iovec
[seg
].iov_base
;
362 new_iovec
[tmpnew_nr_segs
].iov_len
=
363 (pvfs_bufmap_size_query() - count
);
365 sizes
[sizes_count
]++;
368 orig_iovec
[seg
].iov_base
+=
369 (pvfs_bufmap_size_query() - count
);
370 orig_iovec
[seg
].iov_len
-=
371 (pvfs_bufmap_size_query() - count
);
381 *new_nr_segs
= tmpnew_nr_segs
;
382 /* new_iovec is freed by the caller */
383 *new_vec
= new_iovec
;
384 *seg_count
= sizes_count
;
385 /* seg_array is also freed by the caller */
391 static long bound_max_iovecs(const struct iovec
*curr
, unsigned long nr_segs
,
392 ssize_t
*total_count
)
402 for (i
= 0; i
< nr_segs
; i
++) {
403 const struct iovec
*iv
= &curr
[i
];
405 count
+= iv
->iov_len
;
406 if (unlikely((ssize_t
) (count
| iv
->iov_len
) < 0))
408 if (total
+ iv
->iov_len
< pvfs_bufmap_size_query()) {
409 total
+= iv
->iov_len
;
413 (total
+ iv
->iov_len
- pvfs_bufmap_size_query());
414 max_nr_iovecs
+= (total
/ pvfs_bufmap_size_query() + 2);
417 *total_count
= count
;
418 return max_nr_iovecs
;
422 * Common entry point for read/write/readv/writev
423 * This function will dispatch it to either the direct I/O
424 * or buffered I/O path depending on the mount options and/or
425 * augmented/extended metadata attached to the file.
426 * Note: File extended attributes override any mount options.
428 static ssize_t
do_readv_writev(enum PVFS_io_type type
, struct file
*file
,
429 loff_t
*offset
, const struct iovec
*iov
, unsigned long nr_segs
)
431 struct inode
*inode
= file
->f_mapping
->host
;
432 struct pvfs2_inode_s
*pvfs2_inode
= PVFS2_I(inode
);
433 struct pvfs2_khandle
*handle
= &pvfs2_inode
->refn
.khandle
;
436 unsigned int to_free
;
439 unsigned long new_nr_segs
;
440 unsigned long max_new_nr_segs
;
441 unsigned long seg_count
;
442 unsigned long *seg_array
;
443 struct iovec
*iovecptr
;
451 /* Compute total and max number of segments after split */
452 max_new_nr_segs
= bound_max_iovecs(iov
, nr_segs
, &count
);
454 gossip_debug(GOSSIP_FILE_DEBUG
,
455 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
460 if (type
== PVFS_IO_WRITE
) {
461 gossip_debug(GOSSIP_FILE_DEBUG
,
462 "%s(%pU): proceeding with offset : %llu, "
476 * if the total size of data transfer requested is greater than
477 * the kernel-set blocksize of PVFS2, then we split the iovecs
478 * such that no iovec description straddles a block size limit
481 gossip_debug(GOSSIP_FILE_DEBUG
,
482 "%s: pvfs_bufmap_size:%d\n",
484 pvfs_bufmap_size_query());
486 if (count
> pvfs_bufmap_size_query()) {
488 * Split up the given iovec description such that
489 * no iovec descriptor straddles over the block-size limitation.
490 * This makes us our job easier to stage the I/O.
491 * In addition, this function will also compute an array
492 * with seg_count entries that will store the number of
493 * segments that straddle the block-size boundaries.
495 ret
= split_iovecs(max_new_nr_segs
, /* IN */
498 &new_nr_segs
, /* OUT */
500 &seg_count
, /* OUT */
501 &seg_array
); /* OUT */
503 gossip_err("%s: Failed to split iovecs to satisfy larger than blocksize readv/writev request %zd\n",
508 gossip_debug(GOSSIP_FILE_DEBUG
,
509 "%s: Splitting iovecs from %lu to %lu"
515 /* We must free seg_array and iovecptr */
518 new_nr_segs
= nr_segs
;
519 /* use the given iovec description */
520 iovecptr
= (struct iovec
*)iov
;
521 /* There is only 1 element in the seg_array */
523 /* and its value is the number of segments passed in */
524 seg_array
= &nr_segs
;
525 /* We dont have to free up anything */
530 gossip_debug(GOSSIP_FILE_DEBUG
,
531 "%s(%pU) %zd@%llu\n",
536 gossip_debug(GOSSIP_FILE_DEBUG
,
537 "%s(%pU): new_nr_segs: %lu, seg_count: %lu\n",
540 new_nr_segs
, seg_count
);
542 /* PVFS2_KERNEL_DEBUG is a CFLAGS define. */
543 #ifdef PVFS2_KERNEL_DEBUG
544 for (seg
= 0; seg
< new_nr_segs
; seg
++)
545 gossip_debug(GOSSIP_FILE_DEBUG
,
546 "%s: %d) %p to %p [%d bytes]\n",
549 iovecptr
[seg
].iov_base
,
550 iovecptr
[seg
].iov_base
+ iovecptr
[seg
].iov_len
,
551 (int)iovecptr
[seg
].iov_len
);
552 for (seg
= 0; seg
< seg_count
; seg
++)
553 gossip_debug(GOSSIP_FILE_DEBUG
,
560 while (total_count
< count
) {
561 struct iov_iter iter
;
565 /* how much to transfer in this loop iteration */
567 (((count
- total_count
) > pvfs_bufmap_size_query()) ?
568 pvfs_bufmap_size_query() :
569 (count
- total_count
));
571 gossip_debug(GOSSIP_FILE_DEBUG
,
572 "%s(%pU): size of each_count(%d)\n",
576 gossip_debug(GOSSIP_FILE_DEBUG
,
577 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
582 iov_iter_init(&iter
, type
== PVFS_IO_READ
? READ
: WRITE
,
583 ptr
, seg_array
[seg
], each_count
);
585 ret
= wait_for_direct_io(type
, inode
, offset
, &iter
,
587 gossip_debug(GOSSIP_FILE_DEBUG
,
588 "%s(%pU): return from wait_for_io:%d\n",
596 /* advance the iovec pointer */
597 ptr
+= seg_array
[seg
];
603 gossip_debug(GOSSIP_FILE_DEBUG
,
604 "%s(%pU): AFTER wait_for_io: offset is %d\n",
610 * if we got a short I/O operations,
611 * fall out and return what we got so far
613 if (amt_complete
< each_count
)
625 if (type
== PVFS_IO_READ
) {
628 SetMtimeFlag(pvfs2_inode
);
629 inode
->i_mtime
= CURRENT_TIME
;
630 mark_inode_dirty_sync(inode
);
634 gossip_debug(GOSSIP_FILE_DEBUG
,
635 "%s(%pU): Value(%d) returned.\n",
644 * Read data from a specified offset in a file (referenced by inode).
645 * Data may be placed either in a user or kernel buffer.
647 ssize_t
pvfs2_inode_read(struct inode
*inode
,
651 loff_t readahead_size
)
653 struct pvfs2_inode_s
*pvfs2_inode
= PVFS2_I(inode
);
656 struct iov_iter iter
;
657 ssize_t ret
= -EINVAL
;
659 g_pvfs2_stats
.reads
++;
664 bufmap_size
= pvfs_bufmap_size_query();
665 if (count
> bufmap_size
) {
666 gossip_debug(GOSSIP_FILE_DEBUG
,
667 "%s: count is too large (%zd/%zd)!\n",
668 __func__
, count
, bufmap_size
);
672 gossip_debug(GOSSIP_FILE_DEBUG
,
673 "%s(%pU) %zd@%llu\n",
675 &pvfs2_inode
->refn
.khandle
,
679 iov_iter_init(&iter
, READ
, &vec
, 1, count
);
680 ret
= wait_for_direct_io(PVFS_IO_READ
, inode
, offset
, &iter
,
681 count
, readahead_size
);
685 gossip_debug(GOSSIP_FILE_DEBUG
,
686 "%s(%pU): Value(%zd) returned.\n",
688 &pvfs2_inode
->refn
.khandle
,
694 static ssize_t
pvfs2_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
696 struct file
*file
= iocb
->ki_filp
;
697 loff_t pos
= *(&iocb
->ki_pos
);
699 unsigned long nr_segs
= iter
->nr_segs
;
701 BUG_ON(iocb
->private);
703 gossip_debug(GOSSIP_FILE_DEBUG
, "pvfs2_file_read_iter\n");
705 g_pvfs2_stats
.reads
++;
707 rc
= do_readv_writev(PVFS_IO_READ
,
717 static ssize_t
pvfs2_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
719 struct file
*file
= iocb
->ki_filp
;
720 loff_t pos
= *(&iocb
->ki_pos
);
721 unsigned long nr_segs
= iter
->nr_segs
;
724 BUG_ON(iocb
->private);
726 gossip_debug(GOSSIP_FILE_DEBUG
, "pvfs2_file_write_iter\n");
728 mutex_lock(&file
->f_mapping
->host
->i_mutex
);
730 /* Make sure generic_write_checks sees an up to date inode size. */
731 if (file
->f_flags
& O_APPEND
) {
732 rc
= pvfs2_inode_getattr(file
->f_mapping
->host
,
735 gossip_err("%s: pvfs2_inode_getattr failed, rc:%zd:.\n",
741 if (file
->f_pos
> i_size_read(file
->f_mapping
->host
))
742 pvfs2_i_size_write(file
->f_mapping
->host
, file
->f_pos
);
744 rc
= generic_write_checks(iocb
, iter
);
747 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
752 rc
= do_readv_writev(PVFS_IO_WRITE
,
758 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
764 g_pvfs2_stats
.writes
++;
768 mutex_unlock(&file
->f_mapping
->host
->i_mutex
);
773 * Perform a miscellaneous operation on a file.
775 static long pvfs2_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
781 gossip_debug(GOSSIP_FILE_DEBUG
,
782 "pvfs2_ioctl: called with cmd %d\n",
786 * we understand some general ioctls on files, such as the immutable
789 if (cmd
== FS_IOC_GETFLAGS
) {
791 ret
= pvfs2_xattr_get_default(file
->f_path
.dentry
,
792 "user.pvfs2.meta_hint",
796 if (ret
< 0 && ret
!= -ENODATA
)
798 else if (ret
== -ENODATA
)
801 gossip_debug(GOSSIP_FILE_DEBUG
,
802 "pvfs2_ioctl: FS_IOC_GETFLAGS: %llu\n",
803 (unsigned long long)uval
);
804 return put_user(uval
, (int __user
*)arg
);
805 } else if (cmd
== FS_IOC_SETFLAGS
) {
807 if (get_user(uval
, (int __user
*)arg
))
810 * PVFS_MIRROR_FL is set internally when the mirroring mode
811 * is turned on for a file. The user is not allowed to turn
812 * on this bit, but the bit is present if the user first gets
813 * the flags and then updates the flags with some new
814 * settings. So, we ignore it in the following edit. bligon.
816 if ((uval
& ~PVFS_MIRROR_FL
) &
817 (~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| FS_NOATIME_FL
))) {
818 gossip_err("pvfs2_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
822 gossip_debug(GOSSIP_FILE_DEBUG
,
823 "pvfs2_ioctl: FS_IOC_SETFLAGS: %llu\n",
824 (unsigned long long)val
);
825 ret
= pvfs2_xattr_set_default(file
->f_path
.dentry
,
826 "user.pvfs2.meta_hint",
837 * Memory map a region of a file.
839 static int pvfs2_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
841 gossip_debug(GOSSIP_FILE_DEBUG
,
842 "pvfs2_file_mmap: called on %s\n",
844 (char *)file
->f_path
.dentry
->d_name
.name
:
847 /* set the sequential readahead hint */
848 vma
->vm_flags
|= VM_SEQ_READ
;
849 vma
->vm_flags
&= ~VM_RAND_READ
;
851 /* Use readonly mmap since we cannot support writable maps. */
852 return generic_file_readonly_mmap(file
, vma
);
855 #define mapping_nrpages(idata) ((idata)->nrpages)
858 * Called to notify the module that there are no more references to
859 * this file (i.e. no processes have it open).
861 * \note Not called when each file is closed.
863 static int pvfs2_file_release(struct inode
*inode
, struct file
*file
)
865 gossip_debug(GOSSIP_FILE_DEBUG
,
866 "pvfs2_file_release: called on %s\n",
867 file
->f_path
.dentry
->d_name
.name
);
869 pvfs2_flush_inode(inode
);
872 * remove all associated inode pages from the page cache and mmap
873 * readahead cache (if any); this forces an expensive refresh of
874 * data for the next caller of mmap (or 'get_block' accesses)
876 if (file
->f_path
.dentry
->d_inode
&&
877 file
->f_path
.dentry
->d_inode
->i_mapping
&&
878 mapping_nrpages(&file
->f_path
.dentry
->d_inode
->i_data
))
879 truncate_inode_pages(file
->f_path
.dentry
->d_inode
->i_mapping
,
885 * Push all data for a specific file onto permanent storage.
887 static int pvfs2_fsync(struct file
*file
,
893 struct pvfs2_inode_s
*pvfs2_inode
=
894 PVFS2_I(file
->f_path
.dentry
->d_inode
);
895 struct pvfs2_kernel_op_s
*new_op
= NULL
;
898 filemap_write_and_wait_range(file
->f_mapping
, start
, end
);
900 new_op
= op_alloc(PVFS2_VFS_OP_FSYNC
);
903 new_op
->upcall
.req
.fsync
.refn
= pvfs2_inode
->refn
;
905 ret
= service_operation(new_op
,
907 get_interruptible_flag(file
->f_path
.dentry
->d_inode
));
909 gossip_debug(GOSSIP_FILE_DEBUG
,
910 "pvfs2_fsync got return value of %d\n",
915 pvfs2_flush_inode(file
->f_path
.dentry
->d_inode
);
920 * Change the file pointer position for an instance of an open file.
922 * \note If .llseek is overriden, we must acquire lock as described in
923 * Documentation/filesystems/Locking.
925 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
926 * require much changes to the FS
928 static loff_t
pvfs2_file_llseek(struct file
*file
, loff_t offset
, int origin
)
931 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
934 gossip_err("pvfs2_file_llseek: invalid inode (NULL)\n");
938 if (origin
== PVFS2_SEEK_END
) {
940 * revalidate the inode's file size.
941 * NOTE: We are only interested in file size here,
942 * so we set mask accordingly.
944 ret
= pvfs2_inode_getattr(inode
, PVFS_ATTR_SYS_SIZE
);
946 gossip_debug(GOSSIP_FILE_DEBUG
,
947 "%s:%s:%d calling make bad inode\n",
951 pvfs2_make_bad_inode(inode
);
956 gossip_debug(GOSSIP_FILE_DEBUG
,
957 "pvfs2_file_llseek: offset is %ld | origin is %d"
958 " | inode size is %lu\n",
961 (unsigned long)file
->f_path
.dentry
->d_inode
->i_size
);
963 return generic_file_llseek(file
, offset
, origin
);
967 * Support local locks (locks that only this kernel knows about)
968 * if Orangefs was mounted -o local_lock.
970 static int pvfs2_lock(struct file
*filp
, int cmd
, struct file_lock
*fl
)
974 if (PVFS2_SB(filp
->f_inode
->i_sb
)->flags
& PVFS2_OPT_LOCAL_LOCK
) {
975 if (cmd
== F_GETLK
) {
977 posix_test_lock(filp
, fl
);
979 rc
= posix_lock_file(filp
, fl
, NULL
);
986 /** PVFS2 implementation of VFS file operations */
987 const struct file_operations pvfs2_file_operations
= {
988 .llseek
= pvfs2_file_llseek
,
989 .read_iter
= pvfs2_file_read_iter
,
990 .write_iter
= pvfs2_file_write_iter
,
992 .unlocked_ioctl
= pvfs2_ioctl
,
993 .mmap
= pvfs2_file_mmap
,
994 .open
= generic_file_open
,
995 .release
= pvfs2_file_release
,
996 .fsync
= pvfs2_fsync
,