fs: pass exact type of data dirties to ->dirty_inode
[deliverable/linux.git] / fs / ext4 / inode.c
index ad8e303c0d2997c396489d80b8b210fdad67a092..a5763e3505ba59cc330e2cecbcf54c5eb23d8410 100644 (file)
@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
        while (target > 0) {
                count = target;
                /* allocating blocks for indirect blocks and direct blocks */
-               current_block = ext4_new_meta_blocks(handle, inode,
-                                                       goal, &count, err);
+               current_block = ext4_new_meta_blocks(handle, inode, goal,
+                                                    0, &count, err);
                if (*err)
                        goto failed_out;
 
@@ -1930,7 +1930,7 @@ repeat:
         * We do still charge estimated metadata to the sb though;
         * we cannot afford to run out of free blocks.
         */
-       if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
+       if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
                dquot_release_reservation_block(inode, 1);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                 * for partial write.
                 */
                set_buffer_new(bh);
+               set_buffer_mapped(bh);
        }
        return 0;
 }
@@ -2795,9 +2796,7 @@ static int write_cache_pages_da(struct address_space *mapping,
                                continue;
                        }
 
-                       if (PageWriteback(page))
-                               wait_on_page_writeback(page);
-
+                       wait_on_page_writeback(page);
                        BUG_ON(PageWriteback(page));
 
                        if (mpd->next_page != page->index)
@@ -3512,7 +3511,7 @@ retry:
                        loff_t end = offset + iov_length(iov, nr_segs);
 
                        if (end > isize)
-                               vmtruncate(inode, isize);
+                               ext4_truncate_failed_write(inode);
                }
        }
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3914,10 +3913,31 @@ void ext4_set_aops(struct inode *inode)
  */
 int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from)
+{
+       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned length;
+       unsigned blocksize;
+       struct inode *inode = mapping->host;
+
+       blocksize = inode->i_sb->s_blocksize;
+       length = blocksize - (offset & (blocksize - 1));
+
+       return ext4_block_zero_page_range(handle, mapping, from, length);
+}
+
+/*
+ * ext4_block_zero_page_range() zeros out a mapping of length 'length'
+ * starting from file offset 'from'.  The range to be zero'd must
+ * be contained with in one block.  If the specified range exceeds
+ * the end of the block it will be shortened to end of the block
+ * that cooresponds to 'from'
+ */
+int ext4_block_zero_page_range(handle_t *handle,
+               struct address_space *mapping, loff_t from, loff_t length)
 {
        ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned blocksize, length, pos;
+       unsigned blocksize, max, pos;
        ext4_lblk_t iblock;
        struct inode *inode = mapping->host;
        struct buffer_head *bh;
@@ -3930,7 +3950,15 @@ int ext4_block_truncate_page(handle_t *handle,
                return -EINVAL;
 
        blocksize = inode->i_sb->s_blocksize;
-       length = blocksize - (offset & (blocksize - 1));
+       max = blocksize - (offset & (blocksize - 1));
+
+       /*
+        * correct length if it does not fall between
+        * 'from' and the end of the block
+        */
+       if (length > max || length < 0)
+               length = max;
+
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
        if (!page_has_buffers(page))
@@ -4379,8 +4407,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
 
 int ext4_can_truncate(struct inode *inode)
 {
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-               return 0;
        if (S_ISREG(inode->i_mode))
                return 1;
        if (S_ISDIR(inode->i_mode))
@@ -4390,6 +4416,31 @@ int ext4_can_truncate(struct inode *inode)
        return 0;
 }
 
+/*
+ * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+ * associated with the given offset and length
+ *
+ * @inode:  File inode
+ * @offset: The offset where the hole will begin
+ * @len:    The length of the hole
+ *
+ * Returns: 0 on sucess or negative on failure
+ */
+
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       if (!S_ISREG(inode->i_mode))
+               return -ENOTSUPP;
+
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+               /* TODO: Add support for non extent hole punching */
+               return -ENOTSUPP;
+       }
+
+       return ext4_ext_punch_hole(file, offset, length);
+}
+
 /*
  * ext4_truncate()
  *
@@ -4429,8 +4480,8 @@ void ext4_truncate(struct inode *inode)
        Indirect chain[4];
        Indirect *partial;
        __le32 nr = 0;
-       int n;
-       ext4_lblk_t last_block;
+       int n = 0;
+       ext4_lblk_t last_block, max_block;
        unsigned blocksize = inode->i_sb->s_blocksize;
 
        trace_ext4_truncate_enter(inode);
@@ -4455,14 +4506,18 @@ void ext4_truncate(struct inode *inode)
 
        last_block = (inode->i_size + blocksize-1)
                                        >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+       max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+                                       >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
 
        if (inode->i_size & (blocksize - 1))
                if (ext4_block_truncate_page(handle, mapping, inode->i_size))
                        goto out_stop;
 
-       n = ext4_block_to_path(inode, last_block, offsets, NULL);
-       if (n == 0)
-               goto out_stop;  /* error */
+       if (last_block != max_block) {
+               n = ext4_block_to_path(inode, last_block, offsets, NULL);
+               if (n == 0)
+                       goto out_stop;  /* error */
+       }
 
        /*
         * OK.  This truncate is going to happen.  We add the inode to the
@@ -4493,7 +4548,13 @@ void ext4_truncate(struct inode *inode)
         */
        ei->i_disksize = inode->i_size;
 
-       if (n == 1) {           /* direct blocks */
+       if (last_block == max_block) {
+               /*
+                * It is unnecessary to free any data blocks if last_block is
+                * equal to the indirect block limit.
+                */
+               goto out_unlock;
+       } else if (n == 1) {            /* direct blocks */
                ext4_free_data(handle, inode, NULL, i_data+offsets[0],
                               i_data + EXT4_NDIR_BLOCKS);
                goto do_indirects;
@@ -4553,6 +4614,7 @@ do_indirects:
                ;
        }
 
+out_unlock:
        up_write(&ei->i_data_sem);
        inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
@@ -4605,7 +4667,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
        /*
         * Figure out the offset within the block group inode table
         */
-       inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
+       inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
        inode_offset = ((inode->i_ino - 1) %
                        EXT4_INODES_PER_GROUP(sb));
        block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
@@ -5299,8 +5361,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (S_ISREG(inode->i_mode) &&
            attr->ia_valid & ATTR_SIZE &&
-           (attr->ia_size < inode->i_size ||
-            (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
+           (attr->ia_size < inode->i_size)) {
                handle_t *handle;
 
                handle = ext4_journal_start(inode, 3);
@@ -5334,14 +5395,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                                goto err_out;
                        }
                }
-               /* ext4_truncate will clear the flag */
-               if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
-                       ext4_truncate(inode);
        }
 
-       if ((attr->ia_valid & ATTR_SIZE) &&
-           attr->ia_size != i_size_read(inode))
-               rc = vmtruncate(inode, attr->ia_size);
+       if (attr->ia_valid & ATTR_SIZE) {
+               if (attr->ia_size != i_size_read(inode)) {
+                       truncate_setsize(inode, attr->ia_size);
+                       ext4_truncate(inode);
+               } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
+                       ext4_truncate(inode);
+       }
 
        if (!rc) {
                setattr_copy(inode, attr);
@@ -5398,13 +5460,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
        /* if nrblocks are contiguous */
        if (chunk) {
                /*
-                * With N contiguous data blocks, it need at most
-                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
-                * 2 dindirect blocks
-                * 1 tindirect block
+                * With N contiguous data blocks, we need at most
+                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+                * 2 dindirect blocks, and 1 tindirect block
                 */
-               indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
-               return indirects + 3;
+               return DIV_ROUND_UP(nrblocks,
+                                   EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
        }
        /*
         * if nrblocks are not contiguous, worse case, each block touch
@@ -5672,7 +5733,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  * so would cause a commit on atime updates, which we don't bother doing.
  * We handle synchronous inodes at the highest possible level.
  */
-void ext4_dirty_inode(struct inode *inode)
+void ext4_dirty_inode(struct inode *inode, int flags)
 {
        handle_t *handle;
 
@@ -5800,15 +5861,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto out_unlock;
        }
        ret = 0;
-       if (PageMappedToDisk(page))
-               goto out_unlock;
+
+       lock_page(page);
+       wait_on_page_writeback(page);
+       if (PageMappedToDisk(page)) {
+               up_read(&inode->i_alloc_sem);
+               return VM_FAULT_LOCKED;
+       }
 
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
        else
                len = PAGE_CACHE_SIZE;
 
-       lock_page(page);
        /*
         * return if we have all the buffers mapped. This avoid
         * the need to call write_begin/write_end which does a
@@ -5818,8 +5883,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (page_has_buffers(page)) {
                if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
                                        ext4_bh_unmapped)) {
-                       unlock_page(page);
-                       goto out_unlock;
+                       up_read(&inode->i_alloc_sem);
+                       return VM_FAULT_LOCKED;
                }
        }
        unlock_page(page);
@@ -5839,6 +5904,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret < 0)
                goto out_unlock;
        ret = 0;
+
+       /*
+        * write_begin/end might have created a dirty page and someone
+        * could wander in and start the IO.  Make sure that hasn't
+        * happened.
+        */
+       lock_page(page);
+       wait_on_page_writeback(page);
+       up_read(&inode->i_alloc_sem);
+       return VM_FAULT_LOCKED;
 out_unlock:
        if (ret)
                ret = VM_FAULT_SIGBUS;
This page took 0.035699 seconds and 5 git commands to generate.