direct-io: only inc/dec inode->i_dio_count for file systems
[deliverable/linux.git] / fs / ext4 / indirect.c
index 45fe924f82bce2ff76e3e74b45ec1833729433ea..9588240195090ce4216e87f07379121b4ffe6cfb 100644 (file)
@@ -20,9 +20,9 @@
  *     (sct@redhat.com), 1993, 1998
  */
 
-#include <linux/aio.h>
 #include "ext4_jbd2.h"
 #include "truncate.h"
+#include <linux/uio.h>
 
 #include <trace/events/ext4.h>
 
@@ -642,8 +642,8 @@ out:
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          struct iov_iter *iter, loff_t offset)
+ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                          loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t final_size = offset + count;
 
                if (final_size > inode->i_size) {
@@ -676,37 +676,38 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       if (rw == READ && ext4_should_dioread_nolock(inode)) {
+       if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
                /*
                 * Nolock dioread optimization may be dynamically disabled
                 * via ext4_inode_block_unlocked_dio(). Check inode's state
                 * while holding extra i_dio_count ref.
                 */
-               atomic_inc(&inode->i_dio_count);
+               inode_dio_begin(inode);
                smp_mb();
                if (unlikely(ext4_test_inode_state(inode,
                                                    EXT4_STATE_DIOREAD_LOCK))) {
-                       inode_dio_done(inode);
+                       inode_dio_end(inode);
                        goto locked;
                }
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, 0);
                else
-                       ret = __blockdev_direct_IO(rw, iocb, inode,
-                                       inode->i_sb->s_bdev, iter, offset,
-                                       ext4_get_block, NULL, NULL, 0);
-               inode_dio_done(inode);
+                       ret = __blockdev_direct_IO(iocb, inode,
+                                                  inode->i_sb->s_bdev, iter,
+                                                  offset, ext4_get_block, NULL,
+                                                  NULL, 0);
+               inode_dio_end(inode);
        } else {
 locked:
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, DIO_LOCKING);
                else
-                       ret = blockdev_direct_IO(rw, iocb, inode, iter,
-                                       offset, ext4_get_block);
+                       ret = blockdev_direct_IO(iocb, inode, iter, offset,
+                                                ext4_get_block);
 
-               if (unlikely((rw & WRITE) && ret < 0)) {
+               if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                        loff_t isize = i_size_read(inode);
                        loff_t end = offset + count;
 
This page took 0.025023 seconds and 5 git commands to generate.