Btrfs: Remove debugging statements from the invalidatepage calls
[deliverable/linux.git] / fs / btrfs / inode.c
index 913ab128eee1b6039e9a9ba8f0e548d968af8e5f..5b1514e58de4ade6757cbfbc746c3b266670f94b 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/bio.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
@@ -39,6 +40,7 @@
 #include "btrfs_inode.h"
 #include "ioctl.h"
 #include "print-tree.h"
+#include "volumes.h"
 
 struct btrfs_iget_args {
        u64 ino;
@@ -120,9 +122,12 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
        if (alloc_hint == EXTENT_MAP_INLINE)
                goto out;
 
+       BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
+
        while(num_bytes > 0) {
                cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
                ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
+                                        root->sectorsize,
                                         root->root_key.objectid,
                                         trans->transid,
                                         inode->i_ino, start, 0,
@@ -131,11 +136,17 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
                        WARN_ON(1);
                        goto out;
                }
+               cur_alloc_size = ins.offset;
                ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
                                               start, ins.objectid, ins.offset,
                                               ins.offset);
                inode->i_blocks += ins.offset >> 9;
                btrfs_check_file(root, inode);
+               if (num_bytes < cur_alloc_size) {
+                       printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
+                              cur_alloc_size);
+                       break;
+               }
                num_bytes -= cur_alloc_size;
                alloc_hint = ins.objectid + ins.offset;
                start += cur_alloc_size;
@@ -294,29 +305,80 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
        return 0;
 }
 
-int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end)
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+                        size_t size, struct bio *bio)
+{
+       struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+       struct btrfs_mapping_tree *map_tree;
+       u64 logical = bio->bi_sector << 9;
+       u64 length = 0;
+       u64 map_length;
+       struct bio_vec *bvec;
+       int i;
+       int ret;
+
+       bio_for_each_segment(bvec, bio, i) {
+               length += bvec->bv_len;
+       }
+       map_tree = &root->fs_info->mapping_tree;
+       map_length = length;
+       ret = btrfs_map_block(map_tree, READ, logical,
+                             &map_length, NULL, 0);
+
+       if (map_length < length + size) {
+               return 1;
+       }
+       return 0;
+}
+
+int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+                         int mirror_num)
 {
-       struct inode *inode = page->mapping->host;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
-       char *kaddr;
        int ret = 0;
-       u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
-       size_t offset = start - page_start;
-       if (btrfs_test_opt(root, NODATASUM) ||
-           btrfs_test_flag(inode, NODATASUM))
-               return 0;
+       char *sums = NULL;
+
+       ret = btrfs_csum_one_bio(root, bio, &sums);
+       BUG_ON(ret);
+
        mutex_lock(&root->fs_info->fs_mutex);
        trans = btrfs_start_transaction(root, 1);
+
        btrfs_set_trans_block_group(trans, inode);
-       kaddr = kmap(page);
-       btrfs_csum_file_block(trans, root, inode, inode->i_ino,
-                             start, kaddr + offset, end - start + 1);
-       kunmap(page);
+       btrfs_csum_file_blocks(trans, root, inode, bio, sums);
+
        ret = btrfs_end_transaction(trans, root);
        BUG_ON(ret);
        mutex_unlock(&root->fs_info->fs_mutex);
-       return ret;
+
+       kfree(sums);
+
+       return btrfs_map_bio(root, rw, bio, mirror_num);
+}
+
+int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+                         int mirror_num)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       int ret = 0;
+
+       if (!(rw & (1 << BIO_RW))) {
+               ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+               BUG_ON(ret);
+               goto mapit;
+       }
+
+       if (btrfs_test_opt(root, NODATASUM) ||
+           btrfs_test_flag(inode, NODATASUM)) {
+               goto mapit;
+       }
+
+       return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+                                  inode, rw, bio, mirror_num,
+                                  __btrfs_submit_bio_hook);
+mapit:
+       return btrfs_map_bio(root, rw, bio, mirror_num);
 }
 
 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
@@ -328,9 +390,11 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
        struct btrfs_csum_item *item;
        struct btrfs_path *path = NULL;
        u32 csum;
+
        if (btrfs_test_opt(root, NODATASUM) ||
            btrfs_test_flag(inode, NODATASUM))
                return 0;
+
        mutex_lock(&root->fs_info->fs_mutex);
        path = btrfs_alloc_path();
        item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
@@ -353,6 +417,92 @@ out:
        return ret;
 }
 
+struct io_failure_record {
+       struct page *page;
+       u64 start;
+       u64 len;
+       u64 logical;
+       int last_mirror;
+};
+
+int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
+                                 struct page *page, u64 start, u64 end,
+                                 struct extent_state *state)
+{
+       struct io_failure_record *failrec = NULL;
+       u64 private;
+       struct extent_map *em;
+       struct inode *inode = page->mapping->host;
+       struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct bio *bio;
+       int num_copies;
+       int ret;
+       u64 logical;
+
+       ret = get_state_private(failure_tree, start, &private);
+       if (ret) {
+               failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
+               if (!failrec)
+                       return -ENOMEM;
+               failrec->start = start;
+               failrec->len = end - start + 1;
+               failrec->last_mirror = 0;
+
+               spin_lock(&em_tree->lock);
+               em = lookup_extent_mapping(em_tree, start, failrec->len);
+               if (em->start > start || em->start + em->len < start) {
+                       free_extent_map(em);
+                       em = NULL;
+               }
+               spin_unlock(&em_tree->lock);
+
+               if (!em || IS_ERR(em)) {
+                       kfree(failrec);
+                       return -EIO;
+               }
+               logical = start - em->start;
+               logical = em->block_start + logical;
+               failrec->logical = logical;
+               free_extent_map(em);
+               set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
+                               EXTENT_DIRTY, GFP_NOFS);
+               set_state_private(failure_tree, start,
+                                (u64)(unsigned long)failrec);
+       } else {
+               failrec = (struct io_failure_record *)(unsigned long)private;
+       }
+       num_copies = btrfs_num_copies(
+                             &BTRFS_I(inode)->root->fs_info->mapping_tree,
+                             failrec->logical, failrec->len);
+       failrec->last_mirror++;
+       if (!state) {
+               spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
+               state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
+                                                   failrec->start,
+                                                   EXTENT_LOCKED);
+               if (state && state->start != failrec->start)
+                       state = NULL;
+               spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
+       }
+       if (!state || failrec->last_mirror > num_copies) {
+               set_state_private(failure_tree, failrec->start, 0);
+               clear_extent_bits(failure_tree, failrec->start,
+                                 failrec->start + failrec->len - 1,
+                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+               kfree(failrec);
+               return -EIO;
+       }
+       bio = bio_alloc(GFP_NOFS, 1);
+       bio->bi_private = state;
+       bio->bi_end_io = failed_bio->bi_end_io;
+       bio->bi_sector = failrec->logical >> 9;
+       bio->bi_bdev = failed_bio->bi_bdev;
+       bio_add_page(bio, page, failrec->len, start - page_offset(page));
+       btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
+       return 0;
+}
+
 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                               struct extent_state *state)
 {
@@ -387,6 +537,30 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        }
        kunmap_atomic(kaddr, KM_IRQ0);
        local_irq_restore(flags);
+
+       /* if the io failure tree for this inode is non-empty,
+        * check to see if we've recovered from a failed IO
+        */
+       private = 0;
+       if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
+                            (u64)-1, 1, EXTENT_DIRTY)) {
+               u64 private_failure;
+               struct io_failure_record *failure;
+               ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
+                                       start, &private_failure);
+               if (ret == 0) {
+                       failure = (struct io_failure_record *)(unsigned long)
+                                  private_failure;
+                       set_state_private(&BTRFS_I(inode)->io_failure_tree,
+                                         failure->start, 0);
+                       clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+                                         failure->start,
+                                         failure->start + failure->len - 1,
+                                         EXTENT_DIRTY | EXTENT_LOCKED,
+                                         GFP_NOFS);
+                       kfree(failure);
+               }
+       }
        return 0;
 
 zeroit:
@@ -397,7 +571,9 @@ zeroit:
        flush_dcache_page(page);
        kunmap_atomic(kaddr, KM_IRQ0);
        local_irq_restore(flags);
-       return 0;
+       if (private == 0)
+               return 0;
+       return -EIO;
 }
 
 void btrfs_read_locked_inode(struct inode *inode)
@@ -405,7 +581,7 @@ void btrfs_read_locked_inode(struct inode *inode)
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
-       struct btrfs_inode_timespec *tspec;
+       struct btrfs_timespec *tspec;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key location;
        u64 alloc_group_block;
@@ -454,7 +630,8 @@ void btrfs_read_locked_inode(struct inode *inode)
        BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
        if (!BTRFS_I(inode)->block_group) {
                BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
-                                                        NULL, 0, 0, 0);
+                                                NULL, 0,
+                                                BTRFS_BLOCK_GROUP_METADATA, 0);
        }
        btrfs_free_path(path);
        inode_item = NULL;
@@ -464,6 +641,7 @@ void btrfs_read_locked_inode(struct inode *inode)
        switch (inode->i_mode & S_IFMT) {
        case S_IFREG:
                inode->i_mapping->a_ops = &btrfs_aops;
+               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
                inode->i_fop = &btrfs_file_operations;
                inode->i_op = &btrfs_file_inode_operations;
@@ -478,6 +656,7 @@ void btrfs_read_locked_inode(struct inode *inode)
        case S_IFLNK:
                inode->i_op = &btrfs_symlink_inode_operations;
                inode->i_mapping->a_ops = &btrfs_symlink_aops;
+               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                break;
        default:
                init_special_inode(inode, inode->i_mode, rdev);
@@ -743,8 +922,9 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
        int pending_del_nr = 0;
        int pending_del_slot = 0;
        int extent_type = -1;
+       u64 mask = root->sectorsize - 1;
 
-       btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1);
+       btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
        path = btrfs_alloc_path();
        path->reada = -1;
        BUG_ON(!path);
@@ -1047,7 +1227,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                                                       hole_start, 0, 0,
                                                       hole_size);
                        btrfs_drop_extent_cache(inode, hole_start,
-                                               hole_size - 1);
+                                               (u64)-1);
                        btrfs_check_file(root, inode);
                }
                btrfs_end_transaction(trans, root);
@@ -1236,6 +1416,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
        extent_io_tree_init(&BTRFS_I(inode)->io_tree,
                             inode->i_mapping, GFP_NOFS);
+       extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
+                            inode->i_mapping, GFP_NOFS);
        return 0;
 }
 
@@ -1465,7 +1647,10 @@ read_dir_items:
                        di = (struct btrfs_dir_item *)((char *)di + di_len);
                }
        }
-       filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
+       if (key_type == BTRFS_DIR_INDEX_KEY)
+               filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
+       else
+               filp->f_pos++;
 nopos:
        ret = 0;
 err:
@@ -1520,6 +1705,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 {
        struct inode *inode;
        struct btrfs_inode_item *inode_item;
+       struct btrfs_block_group_cache *new_inode_group;
        struct btrfs_key *location;
        struct btrfs_path *path;
        struct btrfs_inode_ref *ref;
@@ -1539,6 +1725,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
        extent_io_tree_init(&BTRFS_I(inode)->io_tree,
                             inode->i_mapping, GFP_NOFS);
+       extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
+                            inode->i_mapping, GFP_NOFS);
        BTRFS_I(inode)->delalloc_bytes = 0;
        BTRFS_I(inode)->root = root;
 
@@ -1546,8 +1734,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                owner = 0;
        else
                owner = 1;
-       group = btrfs_find_block_group(root, group, 0, 0, owner);
-       BTRFS_I(inode)->block_group = group;
+       new_inode_group = btrfs_find_block_group(root, group, 0,
+                                      BTRFS_BLOCK_GROUP_METADATA, owner);
+       if (!new_inode_group) {
+               printk("find_block group failed\n");
+               new_inode_group = group;
+       }
+       BTRFS_I(inode)->block_group = new_inode_group;
        BTRFS_I(inode)->flags = 0;
 
        key[0].objectid = objectid;
@@ -1753,11 +1946,14 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
+               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                inode->i_fop = &btrfs_file_operations;
                inode->i_op = &btrfs_file_inode_operations;
                extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
                extent_io_tree_init(&BTRFS_I(inode)->io_tree,
                                     inode->i_mapping, GFP_NOFS);
+               extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
+                                    inode->i_mapping, GFP_NOFS);
                BTRFS_I(inode)->delalloc_bytes = 0;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
@@ -1902,6 +2098,68 @@ out_unlock:
        return err;
 }
 
+static int merge_extent_mapping(struct extent_map_tree *em_tree,
+                               struct extent_map *existing,
+                               struct extent_map *em)
+{
+       u64 start_diff;
+       u64 new_end;
+       int ret = 0;
+       int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
+
+       if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
+               goto invalid;
+
+       if (!real_blocks && em->block_start != existing->block_start)
+               goto invalid;
+
+       new_end = max(existing->start + existing->len, em->start + em->len);
+
+       if (existing->start >= em->start) {
+               if (em->start + em->len < existing->start)
+                       goto invalid;
+
+               start_diff = existing->start - em->start;
+               if (real_blocks && em->block_start + start_diff !=
+                   existing->block_start)
+                       goto invalid;
+
+               em->len = new_end - em->start;
+
+               remove_extent_mapping(em_tree, existing);
+               /* free for the tree */
+               free_extent_map(existing);
+               ret = add_extent_mapping(em_tree, em);
+
+       } else if (em->start > existing->start) {
+
+               if (existing->start + existing->len < em->start)
+                       goto invalid;
+
+               start_diff = em->start - existing->start;
+               if (real_blocks && existing->block_start + start_diff !=
+                   em->block_start)
+                       goto invalid;
+
+               remove_extent_mapping(em_tree, existing);
+               em->block_start = existing->block_start;
+               em->start = existing->start;
+               em->len = new_end - existing->start;
+               free_extent_map(existing);
+
+               ret = add_extent_mapping(em_tree, em);
+       } else {
+               goto invalid;
+       }
+       return ret;
+
+invalid:
+       printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
+              existing->start, existing->len, existing->block_start,
+              em->start, em->len, em->block_start);
+       return -EIO;
+}
+
 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
                                    size_t pg_offset, u64 start, u64 len,
                                    int create)
@@ -2086,12 +2344,35 @@ insert:
        err = 0;
        spin_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
+
+       /* it is possible that someone inserted the extent into the tree
+        * while we had the lock dropped.  It is also possible that
+        * an overlapping map exists in the tree
+        */
        if (ret == -EEXIST) {
-               free_extent_map(em);
-               em = lookup_extent_mapping(em_tree, start, len);
-               if (!em) {
-                       err = -EIO;
-                       printk("failing to insert %Lu %Lu\n", start, len);
+               struct extent_map *existing;
+               existing = lookup_extent_mapping(em_tree, start, len);
+               if (!existing) {
+                       existing = lookup_extent_mapping(em_tree, em->start,
+                                                        em->len);
+                       if (existing) {
+                               err = merge_extent_mapping(em_tree, existing,
+                                                          em);
+                               free_extent_map(existing);
+                               if (err) {
+                                       free_extent_map(em);
+                                       em = NULL;
+                               }
+                       } else {
+                               err = -EIO;
+                               printk("failing to insert %Lu %Lu\n",
+                                      start, len);
+                               free_extent_map(em);
+                               em = NULL;
+                       }
+               } else {
+                       free_extent_map(em);
+                       em = existing;
                }
        }
        spin_unlock(&em_tree->lock);
@@ -2111,6 +2392,70 @@ out:
        return em;
 }
 
+static int btrfs_get_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       struct extent_map *em;
+       u64 start = (u64)iblock << inode->i_blkbits;
+       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 len;
+       u64 logical;
+       u64 map_length;
+       int ret = 0;
+
+       em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
+
+       if (!em || IS_ERR(em))
+               goto out;
+
+       if (em->start > start || em->start + em->len <= start)
+           goto out;
+
+       if (em->block_start == EXTENT_MAP_INLINE) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (em->block_start == EXTENT_MAP_HOLE ||
+           em->block_start == EXTENT_MAP_DELALLOC) {
+               goto out;
+       }
+
+       len = em->start + em->len - start;
+       len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
+
+       logical = start - em->start;
+       logical = em->block_start + logical;
+
+       map_length = len;
+       ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
+                             logical, &map_length, &multi, 0);
+       BUG_ON(ret);
+       bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
+       bh_result->b_size = min(map_length, len);
+       bh_result->b_bdev = multi->stripes[0].dev->bdev;
+       set_buffer_mapped(bh_result);
+       kfree(multi);
+out:
+       free_extent_map(em);
+       return ret;
+}
+
+static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
+                       const struct iovec *iov, loff_t offset,
+                       unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+
+       if (rw == WRITE)
+               return -EINVAL;
+
+       return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+                                 offset, nr_segs, btrfs_get_block, NULL);
+}
+
 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
 {
        return extent_bmap(mapping, iblock, btrfs_get_extent);
@@ -2165,6 +2510,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
        map = &BTRFS_I(page->mapping->host)->extent_tree;
        ret = try_release_extent_mapping(map, tree, page, gfp_flags);
        if (ret == 1) {
+               invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
                ClearPagePrivate(page);
                set_page_private(page, 0);
                page_cache_release(page);
@@ -2179,6 +2525,12 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
        tree = &BTRFS_I(page->mapping->host)->io_tree;
        extent_invalidatepage(tree, page, offset);
        btrfs_releasepage(page, GFP_NOFS);
+       if (PagePrivate(page)) {
+               invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
+               ClearPagePrivate(page);
+               set_page_private(page, 0);
+               page_cache_release(page);
+       }
 }
 
 /*
@@ -2921,11 +3273,14 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
+               inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                inode->i_fop = &btrfs_file_operations;
                inode->i_op = &btrfs_file_inode_operations;
                extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
                extent_io_tree_init(&BTRFS_I(inode)->io_tree,
                                     inode->i_mapping, GFP_NOFS);
+               extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
+                                    inode->i_mapping, GFP_NOFS);
                BTRFS_I(inode)->delalloc_bytes = 0;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
@@ -2960,6 +3315,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        inode->i_op = &btrfs_symlink_inode_operations;
        inode->i_mapping->a_ops = &btrfs_symlink_aops;
+       inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
        inode->i_size = name_len - 1;
        err = btrfs_update_inode(trans, root, inode);
        if (err)
@@ -2978,6 +3334,7 @@ out_fail:
        btrfs_throttle(root);
        return err;
 }
+
 static int btrfs_permission(struct inode *inode, int mask,
                            struct nameidata *nd)
 {
@@ -3019,9 +3376,11 @@ static struct file_operations btrfs_dir_file_operations = {
 
 static struct extent_io_ops btrfs_extent_io_ops = {
        .fill_delalloc = run_delalloc_range,
-       .writepage_io_hook = btrfs_writepage_io_hook,
+       .submit_bio_hook = btrfs_submit_bio_hook,
+       .merge_bio_hook = btrfs_merge_bio_hook,
        .readpage_io_hook = btrfs_readpage_io_hook,
        .readpage_end_io_hook = btrfs_readpage_end_io_hook,
+       .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
        .set_bit_hook = btrfs_set_bit_hook,
        .clear_bit_hook = btrfs_clear_bit_hook,
 };
@@ -3033,6 +3392,7 @@ static struct address_space_operations btrfs_aops = {
        .readpages      = btrfs_readpages,
        .sync_page      = block_sync_page,
        .bmap           = btrfs_bmap,
+       .direct_IO      = btrfs_direct_IO,
        .invalidatepage = btrfs_invalidatepage,
        .releasepage    = btrfs_releasepage,
        .set_page_dirty = __set_page_dirty_nobuffers,
This page took 0.037339 seconds and 5 git commands to generate.