Btrfs: Remove debugging statements from the invalidatepage calls
[deliverable/linux.git] / fs / btrfs / inode.c
index 32e778fb014ef3c68ee32e8e754532bf591d8641..5b1514e58de4ade6757cbfbc746c3b266670f94b 100644 (file)
@@ -122,9 +122,12 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
        if (alloc_hint == EXTENT_MAP_INLINE)
                goto out;
 
+       BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
+
        while(num_bytes > 0) {
                cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
                ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
+                                        root->sectorsize,
                                         root->root_key.objectid,
                                         trans->transid,
                                         inode->i_ino, start, 0,
@@ -133,11 +136,17 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
                        WARN_ON(1);
                        goto out;
                }
+               cur_alloc_size = ins.offset;
                ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
                                               start, ins.objectid, ins.offset,
                                               ins.offset);
                inode->i_blocks += ins.offset >> 9;
                btrfs_check_file(root, inode);
+               if (num_bytes < cur_alloc_size) {
+                       printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
+                              cur_alloc_size);
+                       break;
+               }
                num_bytes -= cur_alloc_size;
                alloc_hint = ins.objectid + ins.offset;
                start += cur_alloc_size;
@@ -322,12 +331,37 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
        return 0;
 }
 
-int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                          int mirror_num)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret = 0;
+       char *sums = NULL;
+
+       ret = btrfs_csum_one_bio(root, bio, &sums);
+       BUG_ON(ret);
+
+       mutex_lock(&root->fs_info->fs_mutex);
+       trans = btrfs_start_transaction(root, 1);
+
+       btrfs_set_trans_block_group(trans, inode);
+       btrfs_csum_file_blocks(trans, root, inode, bio, sums);
+
+       ret = btrfs_end_transaction(trans, root);
+       BUG_ON(ret);
+       mutex_unlock(&root->fs_info->fs_mutex);
+
+       kfree(sums);
+
+       return btrfs_map_bio(root, rw, bio, mirror_num);
+}
+
+int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+                         int mirror_num)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       int ret = 0;
 
        if (!(rw & (1 << BIO_RW))) {
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -340,13 +374,9 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                goto mapit;
        }
 
-       mutex_lock(&root->fs_info->fs_mutex);
-       trans = btrfs_start_transaction(root, 1);
-       btrfs_set_trans_block_group(trans, inode);
-       btrfs_csum_file_blocks(trans, root, inode, bio);
-       ret = btrfs_end_transaction(trans, root);
-       BUG_ON(ret);
-       mutex_unlock(&root->fs_info->fs_mutex);
+       return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+                                  inode, rw, bio, mirror_num,
+                                  __btrfs_submit_bio_hook);
 mapit:
        return btrfs_map_bio(root, rw, bio, mirror_num);
 }
@@ -360,9 +390,11 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
        struct btrfs_csum_item *item;
        struct btrfs_path *path = NULL;
        u32 csum;
+
        if (btrfs_test_opt(root, NODATASUM) ||
            btrfs_test_flag(inode, NODATASUM))
                return 0;
+
        mutex_lock(&root->fs_info->fs_mutex);
        path = btrfs_alloc_path();
        item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
@@ -402,6 +434,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
        struct extent_map *em;
        struct inode *inode = page->mapping->host;
        struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct bio *bio;
        int num_copies;
        int ret;
@@ -409,7 +442,6 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
 
        ret = get_state_private(failure_tree, start, &private);
        if (ret) {
-               size_t pg_offset = start - page_offset(page);
                failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
                if (!failrec)
                        return -ENOMEM;
@@ -417,8 +449,13 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
                failrec->len = end - start + 1;
                failrec->last_mirror = 0;
 
-               em = btrfs_get_extent(inode, NULL, pg_offset, start,
-                                     failrec->len, 0);
+               spin_lock(&em_tree->lock);
+               em = lookup_extent_mapping(em_tree, start, failrec->len);
+               if (em->start > start || em->start + em->len < start) {
+                       free_extent_map(em);
+                       em = NULL;
+               }
+               spin_unlock(&em_tree->lock);
 
                if (!em || IS_ERR(em)) {
                        kfree(failrec);
@@ -430,9 +467,10 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
                free_extent_map(em);
                set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
                                EXTENT_DIRTY, GFP_NOFS);
-               set_state_private(failure_tree, start, (u64)failrec);
+               set_state_private(failure_tree, start,
+                                (u64)(unsigned long)failrec);
        } else {
-               failrec = (struct io_failure_record *)private;
+               failrec = (struct io_failure_record *)(unsigned long)private;
        }
        num_copies = btrfs_num_copies(
                              &BTRFS_I(inode)->root->fs_info->mapping_tree,
@@ -511,7 +549,8 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
                                        start, &private_failure);
                if (ret == 0) {
-                       failure = (struct io_failure_record *)private_failure;
+                       failure = (struct io_failure_record *)(unsigned long)
+                                  private_failure;
                        set_state_private(&BTRFS_I(inode)->io_failure_tree,
                                          failure->start, 0);
                        clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
@@ -532,6 +571,8 @@ zeroit:
        flush_dcache_page(page);
        kunmap_atomic(kaddr, KM_IRQ0);
        local_irq_restore(flags);
+       if (private == 0)
+               return 0;
        return -EIO;
 }
 
@@ -881,8 +922,9 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
        int pending_del_nr = 0;
        int pending_del_slot = 0;
        int extent_type = -1;
+       u64 mask = root->sectorsize - 1;
 
-       btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1);
+       btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
        path = btrfs_alloc_path();
        path->reada = -1;
        BUG_ON(!path);
@@ -1185,7 +1227,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                                                       hole_start, 0, 0,
                                                       hole_size);
                        btrfs_drop_extent_cache(inode, hole_start,
-                                               hole_size - 1);
+                                               (u64)-1);
                        btrfs_check_file(root, inode);
                }
                btrfs_end_transaction(trans, root);
@@ -2056,6 +2098,68 @@ out_unlock:
        return err;
 }
 
+static int merge_extent_mapping(struct extent_map_tree *em_tree,
+                               struct extent_map *existing,
+                               struct extent_map *em)
+{
+       u64 start_diff;
+       u64 new_end;
+       int ret = 0;
+       int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
+
+       if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
+               goto invalid;
+
+       if (!real_blocks && em->block_start != existing->block_start)
+               goto invalid;
+
+       new_end = max(existing->start + existing->len, em->start + em->len);
+
+       if (existing->start >= em->start) {
+               if (em->start + em->len < existing->start)
+                       goto invalid;
+
+               start_diff = existing->start - em->start;
+               if (real_blocks && em->block_start + start_diff !=
+                   existing->block_start)
+                       goto invalid;
+
+               em->len = new_end - em->start;
+
+               remove_extent_mapping(em_tree, existing);
+               /* free for the tree */
+               free_extent_map(existing);
+               ret = add_extent_mapping(em_tree, em);
+
+       } else if (em->start > existing->start) {
+
+               if (existing->start + existing->len < em->start)
+                       goto invalid;
+
+               start_diff = em->start - existing->start;
+               if (real_blocks && existing->block_start + start_diff !=
+                   em->block_start)
+                       goto invalid;
+
+               remove_extent_mapping(em_tree, existing);
+               em->block_start = existing->block_start;
+               em->start = existing->start;
+               em->len = new_end - existing->start;
+               free_extent_map(existing);
+
+               ret = add_extent_mapping(em_tree, em);
+       } else {
+               goto invalid;
+       }
+       return ret;
+
+invalid:
+       printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
+              existing->start, existing->len, existing->block_start,
+              em->start, em->len, em->block_start);
+       return -EIO;
+}
+
 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
                                    size_t pg_offset, u64 start, u64 len,
                                    int create)
@@ -2240,12 +2344,35 @@ insert:
        err = 0;
        spin_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
+
+       /* it is possible that someone inserted the extent into the tree
+        * while we had the lock dropped.  It is also possible that
+        * an overlapping map exists in the tree
+        */
        if (ret == -EEXIST) {
-               free_extent_map(em);
-               em = lookup_extent_mapping(em_tree, start, len);
-               if (!em) {
-                       err = -EIO;
-                       printk("failing to insert %Lu %Lu\n", start, len);
+               struct extent_map *existing;
+               existing = lookup_extent_mapping(em_tree, start, len);
+               if (!existing) {
+                       existing = lookup_extent_mapping(em_tree, em->start,
+                                                        em->len);
+                       if (existing) {
+                               err = merge_extent_mapping(em_tree, existing,
+                                                          em);
+                               free_extent_map(existing);
+                               if (err) {
+                                       free_extent_map(em);
+                                       em = NULL;
+                               }
+                       } else {
+                               err = -EIO;
+                               printk("failing to insert %Lu %Lu\n",
+                                      start, len);
+                               free_extent_map(em);
+                               em = NULL;
+                       }
+               } else {
+                       free_extent_map(em);
+                       em = existing;
                }
        }
        spin_unlock(&em_tree->lock);
@@ -2383,6 +2510,7 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
        map = &BTRFS_I(page->mapping->host)->extent_tree;
        ret = try_release_extent_mapping(map, tree, page, gfp_flags);
        if (ret == 1) {
+               invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
                ClearPagePrivate(page);
                set_page_private(page, 0);
                page_cache_release(page);
@@ -2397,6 +2525,12 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
        tree = &BTRFS_I(page->mapping->host)->io_tree;
        extent_invalidatepage(tree, page, offset);
        btrfs_releasepage(page, GFP_NOFS);
+       if (PagePrivate(page)) {
+               invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
+               ClearPagePrivate(page);
+               set_page_private(page, 0);
+               page_cache_release(page);
+       }
 }
 
 /*
This page took 0.035108 seconds and 5 git commands to generate.