2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/module.h>
20 #include <linux/buffer_head.h>
22 #include <linux/pagemap.h>
23 #include <linux/highmem.h>
24 #include <linux/time.h>
25 #include <linux/init.h>
26 #include <linux/string.h>
27 #include <linux/smp_lock.h>
28 #include <linux/backing-dev.h>
29 #include <linux/mpage.h>
30 #include <linux/swap.h>
31 #include <linux/writeback.h>
32 #include <linux/statfs.h>
33 #include <linux/compat.h>
36 #include "transaction.h"
37 #include "btrfs_inode.h"
39 #include "print-tree.h"
42 static int btrfs_copy_from_user(loff_t pos
, int num_pages
, int write_bytes
,
43 struct page
**prepared_pages
,
44 const char __user
* buf
)
48 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
50 for (i
= 0; i
< num_pages
&& write_bytes
> 0; i
++, offset
= 0) {
51 size_t count
= min_t(size_t,
52 PAGE_CACHE_SIZE
- offset
, write_bytes
);
53 struct page
*page
= prepared_pages
[i
];
54 fault_in_pages_readable(buf
, count
);
56 /* Copy data from userspace to the current page */
58 page_fault
= __copy_from_user(page_address(page
) + offset
,
60 /* Flush processor's dcache for this page */
61 flush_dcache_page(page
);
69 return page_fault
? -EFAULT
: 0;
72 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
75 for (i
= 0; i
< num_pages
; i
++) {
78 unlock_page(pages
[i
]);
79 mark_page_accessed(pages
[i
]);
80 page_cache_release(pages
[i
]);
84 static int dirty_and_release_pages(struct btrfs_trans_handle
*trans
,
85 struct btrfs_root
*root
,
97 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
98 struct buffer_head
*bh
;
99 struct btrfs_file_extent_item
*ei
;
101 for (i
= 0; i
< num_pages
; i
++) {
102 offset
= pos
& (PAGE_CACHE_SIZE
-1);
103 this_write
= min((size_t)PAGE_CACHE_SIZE
- offset
, write_bytes
);
104 /* FIXME, one block at a time */
106 mutex_lock(&root
->fs_info
->fs_mutex
);
107 trans
= btrfs_start_transaction(root
, 1);
108 btrfs_set_trans_block_group(trans
, inode
);
110 bh
= page_buffers(pages
[i
]);
112 if (buffer_mapped(bh
) && bh
->b_blocknr
== 0) {
113 struct btrfs_key key
;
114 struct btrfs_path
*path
;
118 /* create an inline extent, and copy the data in */
119 path
= btrfs_alloc_path();
121 key
.objectid
= inode
->i_ino
;
122 key
.offset
= pages
[i
]->index
<< PAGE_CACHE_SHIFT
;
124 btrfs_set_key_type(&key
, BTRFS_EXTENT_DATA_KEY
);
125 BUG_ON(write_bytes
>= PAGE_CACHE_SIZE
);
127 btrfs_file_extent_calc_inline_size(write_bytes
);
129 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
132 ei
= btrfs_item_ptr(btrfs_buffer_leaf(path
->nodes
[0]),
133 path
->slots
[0], struct btrfs_file_extent_item
);
134 btrfs_set_file_extent_generation(ei
, trans
->transid
);
135 btrfs_set_file_extent_type(ei
,
136 BTRFS_FILE_EXTENT_INLINE
);
137 ptr
= btrfs_file_extent_inline_start(ei
);
138 btrfs_memcpy(root
, path
->nodes
[0]->b_data
,
139 ptr
, bh
->b_data
, offset
+ write_bytes
);
140 mark_buffer_dirty(path
->nodes
[0]);
141 btrfs_free_path(path
);
142 } else if (buffer_mapped(bh
)) {
143 /* csum the file data */
144 btrfs_csum_file_block(trans
, root
, inode
->i_ino
,
145 pages
[i
]->index
<< PAGE_CACHE_SHIFT
,
146 kmap(pages
[i
]), PAGE_CACHE_SIZE
);
149 SetPageChecked(pages
[i
]);
150 ret
= btrfs_end_transaction(trans
, root
);
152 mutex_unlock(&root
->fs_info
->fs_mutex
);
154 ret
= btrfs_commit_write(file
, pages
[i
], offset
,
155 offset
+ this_write
);
161 WARN_ON(this_write
> write_bytes
);
162 write_bytes
-= this_write
;
169 * this is very complex, but the basic idea is to drop all extents
170 * in the range start - end. hint_block is filled in with a block number
171 * that would be a good hint to the block allocator for this file.
173 * If an extent intersects the range but is not entirely inside the range
174 * it is either truncated or split. Anything entirely inside the range
175 * is deleted from the tree.
177 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
178 struct btrfs_root
*root
, struct inode
*inode
,
179 u64 start
, u64 end
, u64
*hint_block
)
182 struct btrfs_key key
;
183 struct btrfs_leaf
*leaf
;
185 struct btrfs_file_extent_item
*extent
;
188 struct btrfs_file_extent_item old
;
189 struct btrfs_path
*path
;
190 u64 search_start
= start
;
196 path
= btrfs_alloc_path();
200 btrfs_release_path(root
, path
);
201 ret
= btrfs_lookup_file_extent(trans
, root
, path
, inode
->i_ino
,
206 if (path
->slots
[0] == 0) {
217 leaf
= btrfs_buffer_leaf(path
->nodes
[0]);
218 slot
= path
->slots
[0];
219 btrfs_disk_key_to_cpu(&key
, &leaf
->items
[slot
].key
);
220 if (key
.offset
>= end
|| key
.objectid
!= inode
->i_ino
) {
224 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
) {
228 extent
= btrfs_item_ptr(leaf
, slot
,
229 struct btrfs_file_extent_item
);
230 found_type
= btrfs_file_extent_type(extent
);
231 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
232 extent_end
= key
.offset
+
233 (btrfs_file_extent_num_blocks(extent
) <<
236 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
238 extent_end
= key
.offset
+
239 btrfs_file_extent_inline_len(leaf
->items
+ slot
);
242 /* we found nothing we can drop */
243 if (!found_extent
&& !found_inline
) {
248 /* we found nothing inside the range */
249 if (search_start
>= extent_end
) {
254 /* FIXME, there's only one inline extent allowed right now */
256 u64 mask
= root
->blocksize
- 1;
257 search_start
= (extent_end
+ mask
) & ~mask
;
259 search_start
= extent_end
;
261 if (end
< extent_end
&& end
>= key
.offset
) {
264 btrfs_file_extent_disk_blocknr(extent
);
265 u64 disk_num_blocks
=
266 btrfs_file_extent_disk_num_blocks(extent
);
267 memcpy(&old
, extent
, sizeof(old
));
268 if (disk_blocknr
!= 0) {
269 ret
= btrfs_inc_extent_ref(trans
, root
,
270 disk_blocknr
, disk_num_blocks
);
274 WARN_ON(found_inline
);
278 /* truncate existing extent */
279 if (start
> key
.offset
) {
283 WARN_ON(start
& (root
->blocksize
- 1));
285 new_num
= (start
- key
.offset
) >>
287 old_num
= btrfs_file_extent_num_blocks(extent
);
289 btrfs_file_extent_disk_blocknr(extent
);
290 if (btrfs_file_extent_disk_blocknr(extent
)) {
292 (old_num
- new_num
) << 3;
294 btrfs_set_file_extent_num_blocks(extent
,
296 mark_buffer_dirty(path
->nodes
[0]);
301 /* delete the entire extent */
303 u64 disk_blocknr
= 0;
304 u64 disk_num_blocks
= 0;
305 u64 extent_num_blocks
= 0;
308 btrfs_file_extent_disk_blocknr(extent
);
310 btrfs_file_extent_disk_num_blocks(extent
);
312 btrfs_file_extent_num_blocks(extent
);
314 btrfs_file_extent_disk_blocknr(extent
);
316 ret
= btrfs_del_item(trans
, root
, path
);
318 btrfs_release_path(root
, path
);
320 if (found_extent
&& disk_blocknr
!= 0) {
321 inode
->i_blocks
-= extent_num_blocks
<< 3;
322 ret
= btrfs_free_extent(trans
, root
,
328 if (!bookend
&& search_start
>= end
) {
335 /* create bookend, splitting the extent in two */
336 if (bookend
&& found_extent
) {
337 struct btrfs_key ins
;
338 ins
.objectid
= inode
->i_ino
;
341 btrfs_set_key_type(&ins
, BTRFS_EXTENT_DATA_KEY
);
343 btrfs_release_path(root
, path
);
344 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
347 extent
= btrfs_item_ptr(
348 btrfs_buffer_leaf(path
->nodes
[0]),
350 struct btrfs_file_extent_item
);
351 btrfs_set_file_extent_disk_blocknr(extent
,
352 btrfs_file_extent_disk_blocknr(&old
));
353 btrfs_set_file_extent_disk_num_blocks(extent
,
354 btrfs_file_extent_disk_num_blocks(&old
));
356 btrfs_set_file_extent_offset(extent
,
357 btrfs_file_extent_offset(&old
) +
358 ((end
- key
.offset
) >> inode
->i_blkbits
));
359 WARN_ON(btrfs_file_extent_num_blocks(&old
) <
360 (extent_end
- end
) >> inode
->i_blkbits
);
361 btrfs_set_file_extent_num_blocks(extent
,
362 (extent_end
- end
) >> inode
->i_blkbits
);
364 btrfs_set_file_extent_type(extent
,
365 BTRFS_FILE_EXTENT_REG
);
366 btrfs_set_file_extent_generation(extent
,
367 btrfs_file_extent_generation(&old
));
368 btrfs_mark_buffer_dirty(path
->nodes
[0]);
369 if (btrfs_file_extent_disk_blocknr(&old
) != 0) {
371 btrfs_file_extent_num_blocks(extent
) << 3;
378 btrfs_free_path(path
);
383 * this gets pages into the page cache and locks them down
385 static int prepare_pages(struct btrfs_root
*root
,
390 unsigned long first_index
,
391 unsigned long last_index
,
393 u64 alloc_extent_start
)
396 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
397 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
401 struct buffer_head
*bh
;
402 struct buffer_head
*head
;
403 loff_t isize
= i_size_read(inode
);
405 memset(pages
, 0, num_pages
* sizeof(struct page
*));
407 for (i
= 0; i
< num_pages
; i
++) {
408 pages
[i
] = grab_cache_page(inode
->i_mapping
, index
+ i
);
413 cancel_dirty_page(pages
[i
], PAGE_CACHE_SIZE
);
414 wait_on_page_writeback(pages
[i
]);
415 offset
= pos
& (PAGE_CACHE_SIZE
-1);
416 this_write
= min((size_t)PAGE_CACHE_SIZE
- offset
, write_bytes
);
417 if (!page_has_buffers(pages
[i
])) {
418 create_empty_buffers(pages
[i
],
419 root
->fs_info
->sb
->s_blocksize
,
422 head
= page_buffers(pages
[i
]);
425 err
= btrfs_map_bh_to_logical(root
, bh
,
429 goto failed_truncate
;
430 bh
= bh
->b_this_page
;
431 if (alloc_extent_start
)
432 alloc_extent_start
++;
433 } while (bh
!= head
);
435 WARN_ON(this_write
> write_bytes
);
436 write_bytes
-= this_write
;
441 btrfs_drop_pages(pages
, num_pages
);
445 btrfs_drop_pages(pages
, num_pages
);
447 vmtruncate(inode
, isize
);
451 static ssize_t
btrfs_file_write(struct file
*file
, const char __user
*buf
,
452 size_t count
, loff_t
*ppos
)
455 size_t num_written
= 0;
458 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
459 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
460 struct page
*pages
[8];
461 struct page
*pinned
[2];
462 unsigned long first_index
;
463 unsigned long last_index
;
466 u64 alloc_extent_start
;
468 struct btrfs_trans_handle
*trans
;
469 struct btrfs_key ins
;
472 if (file
->f_flags
& O_DIRECT
)
475 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
476 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
477 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
482 err
= remove_suid(file
->f_path
.dentry
);
485 file_update_time(file
);
487 start_pos
= pos
& ~((u64
)PAGE_CACHE_SIZE
- 1);
488 num_blocks
= (count
+ pos
- start_pos
+ root
->blocksize
- 1) >>
491 mutex_lock(&inode
->i_mutex
);
492 first_index
= pos
>> PAGE_CACHE_SHIFT
;
493 last_index
= (pos
+ count
) >> PAGE_CACHE_SHIFT
;
496 * there are lots of better ways to do this, but this code
497 * makes sure the first and last page in the file range are
498 * up to date and ready for cow
500 if ((pos
& (PAGE_CACHE_SIZE
- 1))) {
501 pinned
[0] = grab_cache_page(inode
->i_mapping
, first_index
);
502 if (!PageUptodate(pinned
[0])) {
503 ret
= mpage_readpage(pinned
[0], btrfs_get_block
);
505 wait_on_page_locked(pinned
[0]);
507 unlock_page(pinned
[0]);
510 if ((pos
+ count
) & (PAGE_CACHE_SIZE
- 1)) {
511 pinned
[1] = grab_cache_page(inode
->i_mapping
, last_index
);
512 if (!PageUptodate(pinned
[1])) {
513 ret
= mpage_readpage(pinned
[1], btrfs_get_block
);
515 wait_on_page_locked(pinned
[1]);
517 unlock_page(pinned
[1]);
521 mutex_lock(&root
->fs_info
->fs_mutex
);
522 trans
= btrfs_start_transaction(root
, 1);
525 mutex_unlock(&root
->fs_info
->fs_mutex
);
528 btrfs_set_trans_block_group(trans
, inode
);
529 /* FIXME blocksize != 4096 */
530 inode
->i_blocks
+= num_blocks
<< 3;
533 /* FIXME...EIEIO, ENOSPC and more */
535 /* step one, delete the existing extents in this range */
536 if (start_pos
< inode
->i_size
) {
537 /* FIXME blocksize != pagesize */
538 ret
= btrfs_drop_extents(trans
, root
, inode
,
540 (pos
+ count
+ root
->blocksize
-1) &
541 ~((u64
)root
->blocksize
- 1),
546 /* insert any holes we need to create */
547 if (inode
->i_size
< start_pos
) {
548 u64 last_pos_in_file
;
550 u64 mask
= root
->blocksize
- 1;
551 last_pos_in_file
= (inode
->i_size
+ mask
) & ~mask
;
552 hole_size
= (start_pos
- last_pos_in_file
+ mask
) & ~mask
;
553 hole_size
>>= inode
->i_blkbits
;
554 if (last_pos_in_file
< start_pos
) {
555 ret
= btrfs_insert_file_extent(trans
, root
,
564 * either allocate an extent for the new bytes or setup the key
565 * to show we are doing inline data in the extent
567 if (inode
->i_size
>= PAGE_CACHE_SIZE
|| pos
+ count
< inode
->i_size
||
568 pos
+ count
- start_pos
> BTRFS_MAX_INLINE_DATA_SIZE(root
)) {
569 ret
= btrfs_alloc_extent(trans
, root
, inode
->i_ino
,
570 num_blocks
, hint_block
, (u64
)-1,
573 ret
= btrfs_insert_file_extent(trans
, root
, inode
->i_ino
,
574 start_pos
, ins
.objectid
, ins
.offset
,
582 alloc_extent_start
= ins
.objectid
;
583 ret
= btrfs_end_transaction(trans
, root
);
584 mutex_unlock(&root
->fs_info
->fs_mutex
);
587 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
588 size_t write_bytes
= min(count
,
589 (size_t)PAGE_CACHE_SIZE
- offset
);
590 size_t num_pages
= (write_bytes
+ PAGE_CACHE_SIZE
- 1) >>
593 memset(pages
, 0, sizeof(pages
));
594 ret
= prepare_pages(root
, file
, pages
, num_pages
,
595 pos
, first_index
, last_index
,
596 write_bytes
, alloc_extent_start
);
599 /* FIXME blocks != pagesize */
600 if (alloc_extent_start
)
601 alloc_extent_start
+= num_pages
;
602 ret
= btrfs_copy_from_user(pos
, num_pages
,
603 write_bytes
, pages
, buf
);
606 ret
= dirty_and_release_pages(NULL
, root
, file
, pages
,
607 num_pages
, pos
, write_bytes
);
609 btrfs_drop_pages(pages
, num_pages
);
612 count
-= write_bytes
;
614 num_written
+= write_bytes
;
616 balance_dirty_pages_ratelimited(inode
->i_mapping
);
617 btrfs_btree_balance_dirty(root
);
621 mutex_unlock(&inode
->i_mutex
);
624 page_cache_release(pinned
[0]);
626 page_cache_release(pinned
[1]);
628 current
->backing_dev_info
= NULL
;
629 mark_inode_dirty(inode
);
630 return num_written
? num_written
: err
;
634 * FIXME, do this by stuffing the csum we want in the info hanging off
635 * page->private. For now, verify file csums on read
637 static int btrfs_read_actor(read_descriptor_t
*desc
, struct page
*page
,
638 unsigned long offset
, unsigned long size
)
641 unsigned long left
, count
= desc
->count
;
642 struct inode
*inode
= page
->mapping
->host
;
647 if (!PageChecked(page
)) {
648 /* FIXME, do it per block */
649 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
651 struct buffer_head
*bh
;
653 if (page_has_buffers(page
)) {
654 bh
= page_buffers(page
);
655 if (!buffer_mapped(bh
)) {
656 SetPageChecked(page
);
661 ret
= btrfs_csum_verify_file_block(root
,
662 page
->mapping
->host
->i_ino
,
663 page
->index
<< PAGE_CACHE_SHIFT
,
664 kmap(page
), PAGE_CACHE_SIZE
);
666 if (ret
!= -ENOENT
) {
667 printk("failed to verify ino %lu page %lu ret %d\n",
668 page
->mapping
->host
->i_ino
,
670 memset(page_address(page
), 1, PAGE_CACHE_SIZE
);
671 flush_dcache_page(page
);
674 SetPageChecked(page
);
679 * Faults on the destination of a read are common, so do it before
682 if (!fault_in_pages_writeable(desc
->arg
.buf
, size
)) {
683 kaddr
= kmap_atomic(page
, KM_USER0
);
684 left
= __copy_to_user_inatomic(desc
->arg
.buf
,
685 kaddr
+ offset
, size
);
686 kunmap_atomic(kaddr
, KM_USER0
);
691 /* Do it the slow way */
693 left
= __copy_to_user(desc
->arg
.buf
, kaddr
+ offset
, size
);
698 desc
->error
= -EFAULT
;
701 desc
->count
= count
- size
;
702 desc
->written
+= size
;
703 desc
->arg
.buf
+= size
;
708 * btrfs_file_aio_read - filesystem read routine, with a mod to csum verify
709 * @iocb: kernel I/O control block
710 * @iov: io vector request
711 * @nr_segs: number of segments in the iovec
712 * @pos: current file position
714 static ssize_t
btrfs_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
715 unsigned long nr_segs
, loff_t pos
)
717 struct file
*filp
= iocb
->ki_filp
;
721 loff_t
*ppos
= &iocb
->ki_pos
;
724 for (seg
= 0; seg
< nr_segs
; seg
++) {
725 const struct iovec
*iv
= &iov
[seg
];
728 * If any segment has a negative length, or the cumulative
729 * length ever wraps negative then return -EINVAL.
731 count
+= iv
->iov_len
;
732 if (unlikely((ssize_t
)(count
|iv
->iov_len
) < 0))
734 if (access_ok(VERIFY_WRITE
, iv
->iov_base
, iv
->iov_len
))
739 count
-= iv
->iov_len
; /* This segment is no good */
744 for (seg
= 0; seg
< nr_segs
; seg
++) {
745 read_descriptor_t desc
;
748 desc
.arg
.buf
= iov
[seg
].iov_base
;
749 desc
.count
= iov
[seg
].iov_len
;
753 do_generic_file_read(filp
, ppos
, &desc
,
755 retval
+= desc
.written
;
757 retval
= retval
?: desc
.error
;
765 static int btrfs_sync_file(struct file
*file
,
766 struct dentry
*dentry
, int datasync
)
768 struct inode
*inode
= dentry
->d_inode
;
769 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
771 struct btrfs_trans_handle
*trans
;
774 * FIXME, use inode generation number to check if we can skip the
777 mutex_lock(&root
->fs_info
->fs_mutex
);
778 trans
= btrfs_start_transaction(root
, 1);
783 ret
= btrfs_commit_transaction(trans
, root
);
784 mutex_unlock(&root
->fs_info
->fs_mutex
);
786 return ret
> 0 ? EIO
: ret
;
789 struct file_operations btrfs_file_operations
= {
790 .llseek
= generic_file_llseek
,
791 .read
= do_sync_read
,
792 .aio_read
= btrfs_file_aio_read
,
793 .write
= btrfs_file_write
,
794 .mmap
= generic_file_mmap
,
795 .open
= generic_file_open
,
796 .ioctl
= btrfs_ioctl
,
797 .fsync
= btrfs_sync_file
,
799 .compat_ioctl
= btrfs_compat_ioctl
,