btrfs: Add handler for invalidate page
[deliverable/linux.git] / fs / btrfs / extent_io.c
index f1018cfbfefad0f7b43920bf32ece43b46228276..33a01ea414651d217349940ca6c0e2ed7bea6f4c 100644 (file)
@@ -96,8 +96,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
        inode = tree->mapping->host;
        isize = i_size_read(inode);
        if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
-               printk_ratelimited(KERN_DEBUG
-                   "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
+               btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
+                   "%s: ino %llu isize %llu odd range [%llu,%llu]",
                                caller, btrfs_ino(inode), isize, start, end);
        }
 }
@@ -131,6 +131,25 @@ struct extent_page_data {
        unsigned int sync_io:1;
 };
 
+static void add_extent_changeset(struct extent_state *state, unsigned bits,
+                                struct extent_changeset *changeset,
+                                int set)
+{
+       int ret;
+
+       if (!changeset)
+               return;
+       if (set && (state->state & bits) == bits)
+               return;
+       if (!set && (state->state & bits) == 0)
+               return;
+       changeset->bytes_changed += state->end - state->start + 1;
+       ret = ulist_add(changeset->range_changed, state->start, state->end,
+                       GFP_ATOMIC);
+       /* ENOMEM */
+       BUG_ON(ret < 0);
+}
+
 static noinline void flush_write_bio(void *data);
 static inline struct btrfs_fs_info *
 tree_fs_info(struct extent_io_tree *tree)
@@ -410,7 +429,8 @@ static void clear_state_cb(struct extent_io_tree *tree,
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
-                          struct extent_state *state, unsigned *bits);
+                          struct extent_state *state, unsigned *bits,
+                          struct extent_changeset *changeset);
 
 /*
  * insert an extent_state struct into the tree.  'bits' are set on the
@@ -426,7 +446,7 @@ static int insert_state(struct extent_io_tree *tree,
                        struct extent_state *state, u64 start, u64 end,
                        struct rb_node ***p,
                        struct rb_node **parent,
-                       unsigned *bits)
+                       unsigned *bits, struct extent_changeset *changeset)
 {
        struct rb_node *node;
 
@@ -436,7 +456,7 @@ static int insert_state(struct extent_io_tree *tree,
        state->start = start;
        state->end = end;
 
-       set_state_bits(tree, state, bits);
+       set_state_bits(tree, state, bits, changeset);
 
        node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
        if (node) {
@@ -511,7 +531,8 @@ static struct extent_state *next_state(struct extent_state *state)
  */
 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
                                            struct extent_state *state,
-                                           unsigned *bits, int wake)
+                                           unsigned *bits, int wake,
+                                           struct extent_changeset *changeset)
 {
        struct extent_state *next;
        unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
@@ -522,6 +543,7 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
                tree->dirty_bytes -= range;
        }
        clear_state_cb(tree, state, bits);
+       add_extent_changeset(state, bits_to_clear, changeset, 0);
        state->state &= ~bits_to_clear;
        if (wake)
                wake_up(&state->wq);
@@ -569,10 +591,10 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
  *
  * This takes the tree lock, and returns 0 on success and < 0 on error.
  */
-int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned bits, int wake, int delete,
-                    struct extent_state **cached_state,
-                    gfp_t mask)
+static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+                             unsigned bits, int wake, int delete,
+                             struct extent_state **cached_state,
+                             gfp_t mask, struct extent_changeset *changeset)
 {
        struct extent_state *state;
        struct extent_state *cached;
@@ -671,7 +693,8 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       state = clear_state_bit(tree, state, &bits, wake);
+                       state = clear_state_bit(tree, state, &bits, wake,
+                                               changeset);
                        goto next;
                }
                goto search_again;
@@ -692,13 +715,13 @@ hit_next:
                if (wake)
                        wake_up(&state->wq);
 
-               clear_state_bit(tree, prealloc, &bits, wake);
+               clear_state_bit(tree, prealloc, &bits, wake, changeset);
 
                prealloc = NULL;
                goto out;
        }
 
-       state = clear_state_bit(tree, state, &bits, wake);
+       state = clear_state_bit(tree, state, &bits, wake, changeset);
 next:
        if (last_end == (u64)-1)
                goto out;
@@ -789,7 +812,7 @@ out:
 
 static void set_state_bits(struct extent_io_tree *tree,
                           struct extent_state *state,
-                          unsigned *bits)
+                          unsigned *bits, struct extent_changeset *changeset)
 {
        unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 
@@ -798,6 +821,7 @@ static void set_state_bits(struct extent_io_tree *tree,
                u64 range = state->end - state->start + 1;
                tree->dirty_bytes += range;
        }
+       add_extent_changeset(state, bits_to_set, changeset, 1);
        state->state |= bits_to_set;
 }
 
@@ -835,7 +859,7 @@ static int __must_check
 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                 unsigned bits, unsigned exclusive_bits,
                 u64 *failed_start, struct extent_state **cached_state,
-                gfp_t mask)
+                gfp_t mask, struct extent_changeset *changeset)
 {
        struct extent_state *state;
        struct extent_state *prealloc = NULL;
@@ -873,7 +897,7 @@ again:
                prealloc = alloc_extent_state_atomic(prealloc);
                BUG_ON(!prealloc);
                err = insert_state(tree, prealloc, start, end,
-                                  &p, &parent, &bits);
+                                  &p, &parent, &bits, changeset);
                if (err)
                        extent_io_tree_panic(tree, err);
 
@@ -899,7 +923,7 @@ hit_next:
                        goto out;
                }
 
-               set_state_bits(tree, state, &bits);
+               set_state_bits(tree, state, &bits, changeset);
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
@@ -945,7 +969,7 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       set_state_bits(tree, state, &bits);
+                       set_state_bits(tree, state, &bits, changeset);
                        cache_state(state, cached_state);
                        merge_state(tree, state);
                        if (last_end == (u64)-1)
@@ -980,7 +1004,7 @@ hit_next:
                 * the later extent.
                 */
                err = insert_state(tree, prealloc, start, this_end,
-                                  NULL, NULL, &bits);
+                                  NULL, NULL, &bits, changeset);
                if (err)
                        extent_io_tree_panic(tree, err);
 
@@ -1008,7 +1032,7 @@ hit_next:
                if (err)
                        extent_io_tree_panic(tree, err);
 
-               set_state_bits(tree, prealloc, &bits);
+               set_state_bits(tree, prealloc, &bits, changeset);
                cache_state(prealloc, cached_state);
                merge_state(tree, prealloc);
                prealloc = NULL;
@@ -1038,7 +1062,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                   struct extent_state **cached_state, gfp_t mask)
 {
        return __set_extent_bit(tree, start, end, bits, 0, failed_start,
-                               cached_state, mask);
+                               cached_state, mask, NULL);
 }
 
 
@@ -1111,7 +1135,7 @@ again:
                        goto out;
                }
                err = insert_state(tree, prealloc, start, end,
-                                  &p, &parent, &bits);
+                                  &p, &parent, &bits, NULL);
                if (err)
                        extent_io_tree_panic(tree, err);
                cache_state(prealloc, cached_state);
@@ -1130,9 +1154,9 @@ hit_next:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               set_state_bits(tree, state, &bits);
+               set_state_bits(tree, state, &bits, NULL);
                cache_state(state, cached_state);
-               state = clear_state_bit(tree, state, &clear_bits, 0);
+               state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
                if (last_end == (u64)-1)
                        goto out;
                start = last_end + 1;
@@ -1171,9 +1195,10 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       set_state_bits(tree, state, &bits);
+                       set_state_bits(tree, state, &bits, NULL);
                        cache_state(state, cached_state);
-                       state = clear_state_bit(tree, state, &clear_bits, 0);
+                       state = clear_state_bit(tree, state, &clear_bits, 0,
+                                               NULL);
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
@@ -1208,7 +1233,7 @@ hit_next:
                 * the later extent.
                 */
                err = insert_state(tree, prealloc, start, this_end,
-                                  NULL, NULL, &bits);
+                                  NULL, NULL, &bits, NULL);
                if (err)
                        extent_io_tree_panic(tree, err);
                cache_state(prealloc, cached_state);
@@ -1233,9 +1258,9 @@ hit_next:
                if (err)
                        extent_io_tree_panic(tree, err);
 
-               set_state_bits(tree, prealloc, &bits);
+               set_state_bits(tree, prealloc, &bits, NULL);
                cache_state(prealloc, cached_state);
-               clear_state_bit(tree, prealloc, &clear_bits, 0);
+               clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
                prealloc = NULL;
                goto out;
        }
@@ -1274,6 +1299,30 @@ int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
                              NULL, mask);
 }
 
+int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+                          unsigned bits, gfp_t mask,
+                          struct extent_changeset *changeset)
+{
+       /*
+        * We don't support EXTENT_LOCKED yet, as current changeset will
+        * record any bits changed, so for EXTENT_LOCKED case, it will
+        * either fail with -EEXIST or changeset will record the whole
+        * range.
+        */
+       BUG_ON(bits & EXTENT_LOCKED);
+
+       return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
+                               changeset);
+}
+
+int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+                    unsigned bits, int wake, int delete,
+                    struct extent_state **cached, gfp_t mask)
+{
+       return __clear_extent_bit(tree, start, end, bits, wake, delete,
+                                 cached, mask, NULL);
+}
+
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
                      unsigned bits, gfp_t mask)
 {
@@ -1285,6 +1334,20 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
        return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
 }
 
+int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+                            unsigned bits, gfp_t mask,
+                            struct extent_changeset *changeset)
+{
+       /*
+        * Don't support EXTENT_LOCKED case, same reason as
+        * set_record_extent_bits().
+        */
+       BUG_ON(bits & EXTENT_LOCKED);
+
+       return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
+                                 changeset);
+}
+
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask)
 {
@@ -1343,7 +1406,7 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
        while (1) {
                err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
                                       EXTENT_LOCKED, &failed_start,
-                                      cached_state, GFP_NOFS);
+                                      cached_state, GFP_NOFS, NULL);
                if (err == -EEXIST) {
                        wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
                        start = failed_start;
@@ -1365,7 +1428,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
        u64 failed_start;
 
        err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
-                              &failed_start, NULL, GFP_NOFS);
+                              &failed_start, NULL, GFP_NOFS, NULL);
        if (err == -EEXIST) {
                if (failed_start > start)
                        clear_extent_bit(tree, start, failed_start - 1,
@@ -2078,8 +2141,8 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
                return -EIO;
        }
 
-       printk_ratelimited_in_rcu(KERN_INFO
-                                 "BTRFS: read error corrected: ino %llu off %llu (dev %s sector %llu)\n",
+       btrfs_info_rl_in_rcu(fs_info,
+               "read error corrected: ino %llu off %llu (dev %s sector %llu)",
                                  btrfs_ino(inode), start,
                                  rcu_str_deref(dev->name), sector);
        bio_put(bio);
@@ -2798,7 +2861,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                              bio_end_io_t end_io_func,
                              int mirror_num,
                              unsigned long prev_bio_flags,
-                             unsigned long bio_flags)
+                             unsigned long bio_flags,
+                             bool force_bio_submit)
 {
        int ret = 0;
        struct bio *bio;
@@ -2814,6 +2878,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                        contig = bio_end_sector(bio) == sector;
 
                if (prev_bio_flags != bio_flags || !contig ||
+                   force_bio_submit ||
                    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
                    bio_add_page(bio, page, page_size, offset) < page_size) {
                        ret = submit_one_bio(rw, bio, mirror_num,
@@ -2910,7 +2975,8 @@ static int __do_readpage(struct extent_io_tree *tree,
                         get_extent_t *get_extent,
                         struct extent_map **em_cached,
                         struct bio **bio, int mirror_num,
-                        unsigned long *bio_flags, int rw)
+                        unsigned long *bio_flags, int rw,
+                        u64 *prev_em_start)
 {
        struct inode *inode = page->mapping->host;
        u64 start = page_offset(page);
@@ -2958,6 +3024,7 @@ static int __do_readpage(struct extent_io_tree *tree,
        }
        while (cur <= end) {
                unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+               bool force_bio_submit = false;
 
                if (cur >= last_byte) {
                        char *userpage;
@@ -3008,6 +3075,49 @@ static int __do_readpage(struct extent_io_tree *tree,
                block_start = em->block_start;
                if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
                        block_start = EXTENT_MAP_HOLE;
+
+               /*
+                * If we have a file range that points to a compressed extent
+                * and it's followed by a consecutive file range that points to
+                * to the same compressed extent (possibly with a different
+                * offset and/or length, so it either points to the whole extent
+                * or only part of it), we must make sure we do not submit a
+                * single bio to populate the pages for the 2 ranges because
+                * this makes the compressed extent read zero out the pages
+                * belonging to the 2nd range. Imagine the following scenario:
+                *
+                *  File layout
+                *  [0 - 8K]                     [8K - 24K]
+                *    |                               |
+                *    |                               |
+                * points to extent X,         points to extent X,
+                * offset 4K, length of 8K     offset 0, length 16K
+                *
+                * [extent X, compressed length = 4K uncompressed length = 16K]
+                *
+                * If the bio to read the compressed extent covers both ranges,
+                * it will decompress extent X into the pages belonging to the
+                * first range and then it will stop, zeroing out the remaining
+                * pages that belong to the other range that points to extent X.
+                * So here we make sure we submit 2 bios, one for the first
+                * range and another one for the third range. Both will target
+                * the same physical extent from disk, but we can't currently
+                * make the compressed bio endio callback populate the pages
+                * for both ranges because each compressed bio is tightly
+                * coupled with a single extent map, and each range can have
+                * an extent map with a different offset value relative to the
+                * uncompressed data of our extent and different lengths. This
+                * is a corner case so we prioritize correctness over
+                * non-optimal behavior (submitting 2 bios for the same extent).
+                */
+               if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+                   prev_em_start && *prev_em_start != (u64)-1 &&
+                   *prev_em_start != em->orig_start)
+                       force_bio_submit = true;
+
+               if (prev_em_start)
+                       *prev_em_start = em->orig_start;
+
                free_extent_map(em);
                em = NULL;
 
@@ -3023,8 +3133,12 @@ static int __do_readpage(struct extent_io_tree *tree,
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
-                       unlock_extent_cached(tree, cur, cur + iosize - 1,
-                                            &cached, GFP_NOFS);
+                       if (parent_locked)
+                               free_extent_state(cached);
+                       else
+                               unlock_extent_cached(tree, cur,
+                                                    cur + iosize - 1,
+                                                    &cached, GFP_NOFS);
                        cur = cur + iosize;
                        pg_offset += iosize;
                        continue;
@@ -3057,7 +3171,8 @@ static int __do_readpage(struct extent_io_tree *tree,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
-                                        this_bio_flag);
+                                        this_bio_flag,
+                                        force_bio_submit);
                if (!ret) {
                        nr++;
                        *bio_flags = this_bio_flag;
@@ -3084,7 +3199,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
                                             get_extent_t *get_extent,
                                             struct extent_map **em_cached,
                                             struct bio **bio, int mirror_num,
-                                            unsigned long *bio_flags, int rw)
+                                            unsigned long *bio_flags, int rw,
+                                            u64 *prev_em_start)
 {
        struct inode *inode;
        struct btrfs_ordered_extent *ordered;
@@ -3104,7 +3220,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
-                             mirror_num, bio_flags, rw);
+                             mirror_num, bio_flags, rw, prev_em_start);
                page_cache_release(pages[index]);
        }
 }
@@ -3114,7 +3230,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                               int nr_pages, get_extent_t *get_extent,
                               struct extent_map **em_cached,
                               struct bio **bio, int mirror_num,
-                              unsigned long *bio_flags, int rw)
+                              unsigned long *bio_flags, int rw,
+                              u64 *prev_em_start)
 {
        u64 start = 0;
        u64 end = 0;
@@ -3135,7 +3252,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
                                                  index - first_index, start,
                                                  end, get_extent, em_cached,
                                                  bio, mirror_num, bio_flags,
-                                                 rw);
+                                                 rw, prev_em_start);
                        start = page_start;
                        end = start + PAGE_CACHE_SIZE - 1;
                        first_index = index;
@@ -3146,7 +3263,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
                __do_contiguous_readpages(tree, &pages[first_index],
                                          index - first_index, start,
                                          end, get_extent, em_cached, bio,
-                                         mirror_num, bio_flags, rw);
+                                         mirror_num, bio_flags, rw,
+                                         prev_em_start);
 }
 
 static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -3172,7 +3290,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        }
 
        ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
-                           bio_flags, rw);
+                           bio_flags, rw, NULL);
        return ret;
 }
 
@@ -3198,7 +3316,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
        int ret;
 
        ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
-                                     &bio_flags, READ);
+                           &bio_flags, READ, NULL);
        if (bio)
                ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
        return ret;
@@ -3451,7 +3569,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
                                                 sector, iosize, pg_offset,
                                                 bdev, &epd->bio, max_nr,
                                                 end_bio_extent_writepage,
-                                                0, 0, 0);
+                                                0, 0, 0, false);
                        if (ret)
                                SetPageError(page);
                }
@@ -3754,7 +3872,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
                ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
                                         PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
                                         -1, end_bio_extent_buffer_writepage,
-                                        0, epd->bio_flags, bio_flags);
+                                        0, epd->bio_flags, bio_flags, false);
                epd->bio_flags = bio_flags;
                if (ret) {
                        set_btree_ioerr(p);
@@ -4158,6 +4276,7 @@ int extent_readpages(struct extent_io_tree *tree,
        struct page *page;
        struct extent_map *em_cached = NULL;
        int nr = 0;
+       u64 prev_em_start = (u64)-1;
 
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                page = list_entry(pages->prev, struct page, lru);
@@ -4174,12 +4293,12 @@ int extent_readpages(struct extent_io_tree *tree,
                if (nr < ARRAY_SIZE(pagepool))
                        continue;
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
                nr = 0;
        }
        if (nr)
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
 
        if (em_cached)
                free_extent_map(em_cached);
@@ -5514,13 +5633,15 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
        unsigned long src_i;
 
        if (src_offset + len > dst->len) {
-               printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
-                      "len %lu dst len %lu\n", src_offset, len, dst->len);
+               btrfs_err(dst->fs_info,
+                       "memmove bogus src_offset %lu move "
+                      "len %lu dst len %lu", src_offset, len, dst->len);
                BUG_ON(1);
        }
        if (dst_offset + len > dst->len) {
-               printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
-                      "len %lu dst len %lu\n", dst_offset, len, dst->len);
+               btrfs_err(dst->fs_info,
+                       "memmove bogus dst_offset %lu move "
+                      "len %lu dst len %lu", dst_offset, len, dst->len);
                BUG_ON(1);
        }
 
@@ -5560,13 +5681,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
        unsigned long src_i;
 
        if (src_offset + len > dst->len) {
-               printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
-                      "len %lu len %lu\n", src_offset, len, dst->len);
+               btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
+                      "len %lu len %lu", src_offset, len, dst->len);
                BUG_ON(1);
        }
        if (dst_offset + len > dst->len) {
-               printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
-                      "len %lu len %lu\n", dst_offset, len, dst->len);
+               btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
+                      "len %lu len %lu", dst_offset, len, dst->len);
                BUG_ON(1);
        }
        if (dst_offset < src_offset) {
This page took 0.053627 seconds and 5 git commands to generate.