[XFS] kill the v_flag member in struct bhv_vnode
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_aops.c
index 4475588e973a4aa922144c80b39df5948b5199ef..22a40bd0cce29bc66e0370b38dbe658eefd56d1d 100644 (file)
@@ -37,6 +37,7 @@
 #include "xfs_error.h"
 #include "xfs_rw.h"
 #include "xfs_iomap.h"
+#include "xfs_vnodeops.h"
 #include <linux/mpage.h>
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
@@ -108,14 +109,19 @@ xfs_page_trace(
 
 /*
  * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend.
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
  */
 STATIC void
 xfs_finish_ioend(
-       xfs_ioend_t             *ioend)
+       xfs_ioend_t     *ioend,
+       int             wait)
 {
-       if (atomic_dec_and_test(&ioend->io_remaining))
+       if (atomic_dec_and_test(&ioend->io_remaining)) {
                queue_work(xfsdatad_workqueue, &ioend->io_work);
+               if (wait)
+                       flush_workqueue(xfsdatad_workqueue);
+       }
 }
 
 /*
@@ -156,6 +162,8 @@ xfs_setfilesize(
        xfs_fsize_t             bsize;
 
        ip = xfs_vtoi(ioend->io_vnode);
+       if (!ip)
+               return;
 
        ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
        ASSERT(ioend->io_type != IOMAP_READ);
@@ -174,6 +182,7 @@ xfs_setfilesize(
                ip->i_d.di_size = isize;
                ip->i_update_core = 1;
                ip->i_update_size = 1;
+               mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
        }
 
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -224,7 +233,8 @@ xfs_end_bio_unwritten(
        size_t                  size = ioend->io_size;
 
        if (likely(!ioend->io_error)) {
-               bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
+               xfs_bmap(xfs_vtoi(vp), offset, size,
+                               BMAPI_UNWRITTEN, NULL, NULL);
                xfs_setfilesize(ioend);
        }
        xfs_destroy_ioend(ioend);
@@ -294,12 +304,13 @@ xfs_map_blocks(
        xfs_iomap_t             *mapp,
        int                     flags)
 {
-       bhv_vnode_t             *vp = vn_from_inode(inode);
+       xfs_inode_t             *ip = XFS_I(inode);
        int                     error, nmaps = 1;
 
-       error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
+       error = xfs_bmap(ip, offset, count,
+                               flags, mapp, &nmaps);
        if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
-               VMODIFY(vp);
+               xfs_iflags_set(ip, XFS_IMODIFIED);
        return -error;
 }
 
@@ -334,7 +345,7 @@ xfs_end_bio(
        bio->bi_end_io = NULL;
        bio_put(bio);
 
-       xfs_finish_ioend(ioend);
+       xfs_finish_ioend(ioend, 0);
        return 0;
 }
 
@@ -470,7 +481,7 @@ xfs_submit_ioend(
                }
                if (bio)
                        xfs_submit_ioend_bio(ioend, bio);
-               xfs_finish_ioend(ioend);
+               xfs_finish_ioend(ioend, 0);
        } while ((ioend = next) != NULL);
 }
 
@@ -645,7 +656,7 @@ xfs_probe_cluster(
 
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
-                       size_t pg_offset, len = 0;
+                       size_t pg_offset, pg_len = 0;
 
                        if (tindex == tlast) {
                                pg_offset =
@@ -658,16 +669,16 @@ xfs_probe_cluster(
                                pg_offset = PAGE_CACHE_SIZE;
 
                        if (page->index == tindex && !TestSetPageLocked(page)) {
-                               len = xfs_probe_page(page, pg_offset, mapped);
+                               pg_len = xfs_probe_page(page, pg_offset, mapped);
                                unlock_page(page);
                        }
 
-                       if (!len) {
+                       if (!pg_len) {
                                done = 1;
                                break;
                        }
 
-                       total += len;
+                       total += pg_len;
                        tindex++;
                }
 
@@ -701,7 +712,7 @@ xfs_is_delayed_page(
                        else if (buffer_delay(bh))
                                acceptable = (type == IOMAP_DELAY);
                        else if (buffer_dirty(bh) && buffer_mapped(bh))
-                               acceptable = (type == 0);
+                               acceptable = (type == IOMAP_NEW);
                        else
                                break;
                } while ((bh = bh->b_this_page) != head);
@@ -810,7 +821,7 @@ xfs_convert_page(
                        page_dirty--;
                        count++;
                } else {
-                       type = 0;
+                       type = IOMAP_NEW;
                        if (buffer_mapped(bh) && all_bh && startio) {
                                lock_buffer(bh);
                                xfs_add_to_ioend(inode, bh, offset,
@@ -968,8 +979,8 @@ xfs_page_state_convert(
 
        bh = head = page_buffers(page);
        offset = page_offset(page);
-       flags = -1;
-       type = IOMAP_READ;
+       flags = BMAPI_READ;
+       type = IOMAP_NEW;
 
        /* TODO: cleanup count and page_dirty */
 
@@ -999,14 +1010,16 @@ xfs_page_state_convert(
                 *
                 * Third case, an unmapped buffer was found, and we are
                 * in a path where we need to write the whole page out.
-                */
+                */
                if (buffer_unwritten(bh) || buffer_delay(bh) ||
                    ((buffer_uptodate(bh) || PageUptodate(page)) &&
                     !buffer_mapped(bh) && (unmapped || startio))) {
-                       /*
+                       int new_ioend = 0;
+
+                       /*
                         * Make sure we don't use a read-only iomap
                         */
-                       if (flags == BMAPI_READ)
+                       if (flags == BMAPI_READ)
                                iomap_valid = 0;
 
                        if (buffer_unwritten(bh)) {
@@ -1021,6 +1034,15 @@ xfs_page_state_convert(
                        }
 
                        if (!iomap_valid) {
+                               /*
+                                * if we didn't have a valid mapping then we
+                                * need to ensure that we put the new mapping
+                                * in a new ioend structure. This needs to be
+                                * done to ensure that the ioends correctly
+                                * reflect the block mappings at io completion
+                                * for unwritten extent conversion.
+                                */
+                               new_ioend = 1;
                                if (type == IOMAP_NEW) {
                                        size = xfs_probe_cluster(inode,
                                                        page, bh, head, 0);
@@ -1040,7 +1062,7 @@ xfs_page_state_convert(
                                if (startio) {
                                        xfs_add_to_ioend(inode, bh, offset,
                                                        type, &ioend,
-                                                       !iomap_valid);
+                                                       new_ioend);
                                } else {
                                        set_buffer_dirty(bh);
                                        unlock_buffer(bh);
@@ -1055,7 +1077,7 @@ xfs_page_state_convert(
                         * That means it must already have extents allocated
                         * underneath it. Map the extent by reading it.
                         */
-                       if (!iomap_valid || type != IOMAP_READ) {
+                       if (!iomap_valid || flags != BMAPI_READ) {
                                flags = BMAPI_READ;
                                size = xfs_probe_cluster(inode, page, bh,
                                                                head, 1);
@@ -1066,7 +1088,15 @@ xfs_page_state_convert(
                                iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
 
-                       type = IOMAP_READ;
+                       /*
+                        * We set the type to IOMAP_NEW in case we are doing a
+                        * small write at EOF that is extending the file but
+                        * without needing an allocation. We need to update the
+                        * file size on I/O completion in this case so it is
+                        * the same case as having just allocated a new extent
+                        * that we are writing into for the first time.
+                        */
+                       type = IOMAP_NEW;
                        if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
                                ASSERT(buffer_mapped(bh));
                                if (iomap_valid)
@@ -1216,10 +1246,7 @@ xfs_vm_writepages(
        struct address_space    *mapping,
        struct writeback_control *wbc)
 {
-       struct bhv_vnode        *vp = vn_from_inode(mapping->host);
-
-       if (VN_TRUNC(vp))
-               VUNTRUNCATE(vp);
+       xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
        return generic_writepages(mapping, wbc);
 }
 
@@ -1296,7 +1323,6 @@ __xfs_get_blocks(
        int                     direct,
        bmapi_flags_t           flags)
 {
-       bhv_vnode_t             *vp = vn_from_inode(inode);
        xfs_iomap_t             iomap;
        xfs_off_t               offset;
        ssize_t                 size;
@@ -1306,7 +1332,7 @@ __xfs_get_blocks(
        offset = (xfs_off_t)iblock << inode->i_blkbits;
        ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
        size = bh_result->b_size;
-       error = bhv_vop_bmap(vp, offset, size,
+       error = xfs_bmap(XFS_I(inode), offset, size,
                             create ? flags : BMAPI_READ, &iomap, &niomap);
        if (error)
                return -error;
@@ -1408,6 +1434,13 @@ xfs_end_io_direct(
         * This is not necessary for synchronous direct I/O, but we do
         * it anyway to keep the code uniform and simpler.
         *
+        * Well, if only it were that simple. Because synchronous direct I/O
+        * requires extent conversion to occur *before* we return to userspace,
+        * we have to wait for extent conversion to complete. Look at the
+        * iocb that has been passed to us to determine if this is AIO or
+        * not. If it is synchronous, tell xfs_finish_ioend() to kick the
+        * workqueue and wait for it to complete.
+        *
         * The core direct I/O code might be changed to always call the
         * completion handler in the future, in which case all this can
         * go away.
@@ -1415,9 +1448,9 @@ xfs_end_io_direct(
        ioend->io_offset = offset;
        ioend->io_size = size;
        if (ioend->io_type == IOMAP_READ) {
-               xfs_finish_ioend(ioend);
+               xfs_finish_ioend(ioend, 0);
        } else if (private && size > 0) {
-               xfs_finish_ioend(ioend);
+               xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
        } else {
                /*
                 * A direct I/O write ioend starts it's life in unwritten
@@ -1426,7 +1459,7 @@ xfs_end_io_direct(
                 * handler.
                 */
                INIT_WORK(&ioend->io_work, xfs_end_bio_written);
-               xfs_finish_ioend(ioend);
+               xfs_finish_ioend(ioend, 0);
        }
 
        /*
@@ -1447,13 +1480,13 @@ xfs_vm_direct_IO(
 {
        struct file     *file = iocb->ki_filp;
        struct inode    *inode = file->f_mapping->host;
-       bhv_vnode_t     *vp = vn_from_inode(inode);
        xfs_iomap_t     iomap;
        int             maps = 1;
        int             error;
        ssize_t         ret;
 
-       error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
+       error = xfs_bmap(XFS_I(inode), offset, 0,
+                               BMAPI_DEVICE, &iomap, &maps);
        if (error)
                return -error;
 
@@ -1494,12 +1527,13 @@ xfs_vm_bmap(
        sector_t                block)
 {
        struct inode            *inode = (struct inode *)mapping->host;
-       bhv_vnode_t             *vp = vn_from_inode(inode);
+       struct xfs_inode        *ip = XFS_I(inode);
 
-       vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-       bhv_vop_rwlock(vp, VRWLOCK_READ);
-       bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
-       bhv_vop_rwunlock(vp, VRWLOCK_READ);
+       vn_trace_entry(vn_from_inode(inode), __FUNCTION__,
+                       (inst_t *)__return_address);
+       xfs_rwlock(ip, VRWLOCK_READ);
+       xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
+       xfs_rwunlock(ip, VRWLOCK_READ);
        return generic_block_bmap(mapping, block, xfs_get_blocks);
 }
 
This page took 0.033062 seconds and 5 git commands to generate.