sizeof(struct btrfs_item) * 2) / \
BTRFS_CRC32_SIZE) - 1))
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 objectid, u64 pos,
- u64 offset, u64 disk_num_bytes,
- u64 num_bytes)
+ struct btrfs_root *root,
+ u64 objectid, u64 pos,
+ u64 disk_offset, u64 disk_num_bytes,
+ u64 num_bytes, u64 offset)
{
int ret = 0;
struct btrfs_file_extent_item *item;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- btrfs_set_file_extent_disk_bytenr(leaf, item, offset);
+ btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
- btrfs_set_file_extent_offset(leaf, item, 0);
+ btrfs_set_file_extent_offset(leaf, item, offset);
btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
btrfs_set_file_extent_generation(leaf, item, trans->transid);
btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
return ret;
}
+int btrfs_csum_one_bio(struct btrfs_root *root,
+ struct bio *bio, char **sums_ret)
+{
+ u32 *sums;
+ char *data;
+ struct bio_vec *bvec = bio->bi_io_vec;
+ int bio_index = 0;
+
+ sums = kmalloc(bio->bi_vcnt * BTRFS_CRC32_SIZE, GFP_NOFS);
+ if (!sums)
+ return -ENOMEM;
+ *sums_ret = (char *)sums;
+
+ while(bio_index < bio->bi_vcnt) {
+ data = kmap_atomic(bvec->bv_page, KM_USER0);
+ *sums = ~(u32)0;
+ *sums = btrfs_csum_data(root, data + bvec->bv_offset,
+ *sums, bvec->bv_len);
+ kunmap_atomic(data, KM_USER0);
+ btrfs_csum_final(*sums, (char *)sums);
+ sums++;
+ bio_index++;
+ bvec++;
+ }
+ return 0;
+}
+
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
- struct bio *bio)
+ struct bio *bio, char *sums)
{
u64 objectid = inode->i_ino;
u64 offset;
struct btrfs_csum_item *item_end;
struct extent_buffer *leaf = NULL;
u64 csum_offset;
- u32 csum_result;
+ u32 *sums32 = (u32 *)sums;
u32 nritems;
u32 ins_size;
int bio_index = 0;
struct bio_vec *bvec = bio->bi_io_vec;
- char *data;
+ char *eb_map;
+ char *eb_token;
+ unsigned long map_len;
+ unsigned long map_start;
path = btrfs_alloc_path();
BUG_ON(!path);
item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
btrfs_item_size_nr(leaf, path->slots[0]));
+ eb_token = NULL;
next_bvec:
- data = kmap_atomic(bvec->bv_page, KM_IRQ0);
- csum_result = ~(u32)0;
- csum_result = btrfs_csum_data(root, data + bvec->bv_offset,
- csum_result, bvec->bv_len);
- kunmap_atomic(data, KM_IRQ0);
- btrfs_csum_final(csum_result, (char *)&csum_result);
- if (csum_result == 0) {
- printk("csum result is 0 for inode %lu offset %Lu\n", inode->i_ino, offset);
- }
- write_extent_buffer(leaf, &csum_result, (unsigned long)item,
- BTRFS_CRC32_SIZE);
+ if (!eb_token ||
+ (unsigned long)item + BTRFS_CRC32_SIZE >= map_start + map_len) {
+ int err;
+
+ if (eb_token)
+ unmap_extent_buffer(leaf, eb_token, KM_USER1);
+ eb_token = NULL;
+ err = map_private_extent_buffer(leaf, (unsigned long)item,
+ BTRFS_CRC32_SIZE,
+ &eb_token, &eb_map,
+ &map_start, &map_len, KM_USER1);
+ if (err)
+ eb_token = NULL;
+ }
+ if (eb_token) {
+ memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)),
+ sums32, BTRFS_CRC32_SIZE);
+ } else {
+ write_extent_buffer(leaf, sums32, (unsigned long)item,
+ BTRFS_CRC32_SIZE);
+ }
bio_index++;
bvec++;
+ sums32++;
if (bio_index < bio->bi_vcnt) {
- item = (struct btrfs_csum_item *)((char *)item + BTRFS_CRC32_SIZE);
- if (item < item_end)
+ item = (struct btrfs_csum_item *)((char *)item +
+ BTRFS_CRC32_SIZE);
+ if (item < item_end && offset + PAGE_CACHE_SIZE ==
+ page_offset(bvec->bv_page)) {
+ offset = page_offset(bvec->bv_page);
goto next_bvec;
+ }
+ }
+ if (eb_token) {
+ unmap_extent_buffer(leaf, eb_token, KM_USER1);
+ eb_token = NULL;
}
btrfs_mark_buffer_dirty(path->nodes[0]);
if (bio_index < bio->bi_vcnt) {