Btrfs: Add debugging checks to track down corrupted metadata
[deliverable/linux.git] / fs / btrfs / file.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "compat.h"
40
41
42 static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
43 int write_bytes,
44 struct page **prepared_pages,
45 const char __user * buf)
46 {
47 long page_fault = 0;
48 int i;
49 int offset = pos & (PAGE_CACHE_SIZE - 1);
50
51 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
52 size_t count = min_t(size_t,
53 PAGE_CACHE_SIZE - offset, write_bytes);
54 struct page *page = prepared_pages[i];
55 fault_in_pages_readable(buf, count);
56
57 /* Copy data from userspace to the current page */
58 kmap(page);
59 page_fault = __copy_from_user(page_address(page) + offset,
60 buf, count);
61 /* Flush processor's dcache for this page */
62 flush_dcache_page(page);
63 kunmap(page);
64 buf += count;
65 write_bytes -= count;
66
67 if (page_fault)
68 break;
69 }
70 return page_fault ? -EFAULT : 0;
71 }
72
73 static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
74 {
75 size_t i;
76 for (i = 0; i < num_pages; i++) {
77 if (!pages[i])
78 break;
79 ClearPageChecked(pages[i]);
80 unlock_page(pages[i]);
81 mark_page_accessed(pages[i]);
82 page_cache_release(pages[i]);
83 }
84 }
85
86 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
87 struct btrfs_root *root, struct inode *inode,
88 u64 offset, size_t size,
89 struct page **pages, size_t page_offset,
90 int num_pages)
91 {
92 struct btrfs_key key;
93 struct btrfs_path *path;
94 struct extent_buffer *leaf;
95 char *kaddr;
96 unsigned long ptr;
97 struct btrfs_file_extent_item *ei;
98 struct page *page;
99 u32 datasize;
100 int err = 0;
101 int ret;
102 int i;
103 ssize_t cur_size;
104
105 path = btrfs_alloc_path();
106 if (!path)
107 return -ENOMEM;
108
109 btrfs_set_trans_block_group(trans, inode);
110
111 key.objectid = inode->i_ino;
112 key.offset = offset;
113 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
114
115 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
116 if (ret < 0) {
117 err = ret;
118 goto fail;
119 }
120 if (ret == 1) {
121 struct btrfs_key found_key;
122
123 if (path->slots[0] == 0)
124 goto insert;
125
126 path->slots[0]--;
127 leaf = path->nodes[0];
128 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
129
130 if (found_key.objectid != inode->i_ino)
131 goto insert;
132
133 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
134 goto insert;
135 ei = btrfs_item_ptr(leaf, path->slots[0],
136 struct btrfs_file_extent_item);
137
138 if (btrfs_file_extent_type(leaf, ei) !=
139 BTRFS_FILE_EXTENT_INLINE) {
140 goto insert;
141 }
142 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
143 ret = 0;
144 }
145 if (ret == 0) {
146 u32 found_size;
147 u64 found_end;
148
149 leaf = path->nodes[0];
150 ei = btrfs_item_ptr(leaf, path->slots[0],
151 struct btrfs_file_extent_item);
152
153 if (btrfs_file_extent_type(leaf, ei) !=
154 BTRFS_FILE_EXTENT_INLINE) {
155 err = ret;
156 btrfs_print_leaf(root, leaf);
157 printk("found wasn't inline offset %Lu inode %lu\n",
158 offset, inode->i_ino);
159 goto fail;
160 }
161 found_size = btrfs_file_extent_inline_len(leaf,
162 btrfs_item_nr(leaf, path->slots[0]));
163 found_end = key.offset + found_size;
164
165 if (found_end < offset + size) {
166 btrfs_release_path(root, path);
167 ret = btrfs_search_slot(trans, root, &key, path,
168 offset + size - found_end, 1);
169 BUG_ON(ret != 0);
170
171 ret = btrfs_extend_item(trans, root, path,
172 offset + size - found_end);
173 if (ret) {
174 err = ret;
175 goto fail;
176 }
177 leaf = path->nodes[0];
178 ei = btrfs_item_ptr(leaf, path->slots[0],
179 struct btrfs_file_extent_item);
180 inode->i_blocks += (offset + size - found_end) >> 9;
181 }
182 if (found_end < offset) {
183 ptr = btrfs_file_extent_inline_start(ei) + found_size;
184 memset_extent_buffer(leaf, 0, ptr, offset - found_end);
185 }
186 } else {
187 insert:
188 btrfs_release_path(root, path);
189 datasize = offset + size - key.offset;
190 inode->i_blocks += datasize >> 9;
191 datasize = btrfs_file_extent_calc_inline_size(datasize);
192 ret = btrfs_insert_empty_item(trans, root, path, &key,
193 datasize);
194 if (ret) {
195 err = ret;
196 printk("got bad ret %d\n", ret);
197 goto fail;
198 }
199 leaf = path->nodes[0];
200 ei = btrfs_item_ptr(leaf, path->slots[0],
201 struct btrfs_file_extent_item);
202 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
203 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
204 }
205 ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
206
207 cur_size = size;
208 i = 0;
209 while (size > 0) {
210 page = pages[i];
211 kaddr = kmap_atomic(page, KM_USER0);
212 cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
213 write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
214 kunmap_atomic(kaddr, KM_USER0);
215 page_offset = 0;
216 ptr += cur_size;
217 size -= cur_size;
218 if (i >= num_pages) {
219 printk("i %d num_pages %d\n", i, num_pages);
220 }
221 i++;
222 }
223 btrfs_mark_buffer_dirty(leaf);
224 fail:
225 btrfs_free_path(path);
226 return err;
227 }
228
229 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
230 struct btrfs_root *root,
231 struct file *file,
232 struct page **pages,
233 size_t num_pages,
234 loff_t pos,
235 size_t write_bytes)
236 {
237 int err = 0;
238 int i;
239 struct inode *inode = fdentry(file)->d_inode;
240 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
241 u64 hint_byte;
242 u64 num_bytes;
243 u64 start_pos;
244 u64 end_of_last_block;
245 u64 end_pos = pos + write_bytes;
246 u64 inline_size;
247 int did_inline = 0;
248 loff_t isize = i_size_read(inode);
249
250 start_pos = pos & ~((u64)root->sectorsize - 1);
251 num_bytes = (write_bytes + pos - start_pos +
252 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
253
254 end_of_last_block = start_pos + num_bytes - 1;
255
256 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
257 trans = btrfs_join_transaction(root, 1);
258 if (!trans) {
259 err = -ENOMEM;
260 goto out_unlock;
261 }
262 btrfs_set_trans_block_group(trans, inode);
263 hint_byte = 0;
264
265 if ((end_of_last_block & 4095) == 0) {
266 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
267 }
268 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
269
270 /* FIXME...EIEIO, ENOSPC and more */
271 /* insert any holes we need to create */
272 if (isize < start_pos) {
273 u64 last_pos_in_file;
274 u64 hole_size;
275 u64 mask = root->sectorsize - 1;
276 last_pos_in_file = (isize + mask) & ~mask;
277 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
278 if (hole_size > 0) {
279 btrfs_wait_ordered_range(inode, last_pos_in_file,
280 last_pos_in_file + hole_size);
281 mutex_lock(&BTRFS_I(inode)->extent_mutex);
282 err = btrfs_drop_extents(trans, root, inode,
283 last_pos_in_file,
284 last_pos_in_file + hole_size,
285 last_pos_in_file,
286 &hint_byte);
287 if (err)
288 goto failed;
289
290 err = btrfs_insert_file_extent(trans, root,
291 inode->i_ino,
292 last_pos_in_file,
293 0, 0, hole_size, 0);
294 btrfs_drop_extent_cache(inode, last_pos_in_file,
295 last_pos_in_file + hole_size -1);
296 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
297 btrfs_check_file(root, inode);
298 }
299 if (err)
300 goto failed;
301 }
302
303 /*
304 * either allocate an extent for the new bytes or setup the key
305 * to show we are doing inline data in the extent
306 */
307 inline_size = end_pos;
308 if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
309 inline_size > root->fs_info->max_inline ||
310 (inline_size & (root->sectorsize -1)) == 0 ||
311 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
312 /* check for reserved extents on each page, we don't want
313 * to reset the delalloc bit on things that already have
314 * extents reserved.
315 */
316 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
317 for (i = 0; i < num_pages; i++) {
318 struct page *p = pages[i];
319 SetPageUptodate(p);
320 ClearPageChecked(p);
321 set_page_dirty(p);
322 }
323 } else {
324 u64 aligned_end;
325 /* step one, delete the existing extents in this range */
326 aligned_end = (pos + write_bytes + root->sectorsize - 1) &
327 ~((u64)root->sectorsize - 1);
328 mutex_lock(&BTRFS_I(inode)->extent_mutex);
329 err = btrfs_drop_extents(trans, root, inode, start_pos,
330 aligned_end, aligned_end, &hint_byte);
331 if (err)
332 goto failed;
333 if (isize > inline_size)
334 inline_size = min_t(u64, isize, aligned_end);
335 inline_size -= start_pos;
336 err = insert_inline_extent(trans, root, inode, start_pos,
337 inline_size, pages, 0, num_pages);
338 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
339 BUG_ON(err);
340 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
341
342 /*
343 * an ugly way to do all the prop accounting around
344 * the page bits and mapping tags
345 */
346 set_page_writeback(pages[0]);
347 end_page_writeback(pages[0]);
348 did_inline = 1;
349 }
350 if (end_pos > isize) {
351 i_size_write(inode, end_pos);
352 if (did_inline)
353 BTRFS_I(inode)->disk_i_size = end_pos;
354 btrfs_update_inode(trans, root, inode);
355 }
356 failed:
357 err = btrfs_end_transaction(trans, root);
358 out_unlock:
359 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
360 return err;
361 }
362
363 int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
364 {
365 struct extent_map *em;
366 struct extent_map *split = NULL;
367 struct extent_map *split2 = NULL;
368 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
369 u64 len = end - start + 1;
370 int ret;
371 int testend = 1;
372
373 WARN_ON(end < start);
374 if (end == (u64)-1) {
375 len = (u64)-1;
376 testend = 0;
377 }
378 while(1) {
379 if (!split)
380 split = alloc_extent_map(GFP_NOFS);
381 if (!split2)
382 split2 = alloc_extent_map(GFP_NOFS);
383
384 spin_lock(&em_tree->lock);
385 em = lookup_extent_mapping(em_tree, start, len);
386 if (!em) {
387 spin_unlock(&em_tree->lock);
388 break;
389 }
390 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
391 remove_extent_mapping(em_tree, em);
392
393 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
394 em->start < start) {
395 split->start = em->start;
396 split->len = start - em->start;
397 split->block_start = em->block_start;
398 split->bdev = em->bdev;
399 split->flags = em->flags;
400 ret = add_extent_mapping(em_tree, split);
401 BUG_ON(ret);
402 free_extent_map(split);
403 split = split2;
404 split2 = NULL;
405 }
406 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
407 testend && em->start + em->len > start + len) {
408 u64 diff = start + len - em->start;
409
410 split->start = start + len;
411 split->len = em->start + em->len - (start + len);
412 split->bdev = em->bdev;
413 split->flags = em->flags;
414
415 split->block_start = em->block_start + diff;
416
417 ret = add_extent_mapping(em_tree, split);
418 BUG_ON(ret);
419 free_extent_map(split);
420 split = NULL;
421 }
422 spin_unlock(&em_tree->lock);
423
424 /* once for us */
425 free_extent_map(em);
426 /* once for the tree*/
427 free_extent_map(em);
428 }
429 if (split)
430 free_extent_map(split);
431 if (split2)
432 free_extent_map(split2);
433 return 0;
434 }
435
436 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
437 {
438 return 0;
439 #if 0
440 struct btrfs_path *path;
441 struct btrfs_key found_key;
442 struct extent_buffer *leaf;
443 struct btrfs_file_extent_item *extent;
444 u64 last_offset = 0;
445 int nritems;
446 int slot;
447 int found_type;
448 int ret;
449 int err = 0;
450 u64 extent_end = 0;
451
452 path = btrfs_alloc_path();
453 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
454 last_offset, 0);
455 while(1) {
456 nritems = btrfs_header_nritems(path->nodes[0]);
457 if (path->slots[0] >= nritems) {
458 ret = btrfs_next_leaf(root, path);
459 if (ret)
460 goto out;
461 nritems = btrfs_header_nritems(path->nodes[0]);
462 }
463 slot = path->slots[0];
464 leaf = path->nodes[0];
465 btrfs_item_key_to_cpu(leaf, &found_key, slot);
466 if (found_key.objectid != inode->i_ino)
467 break;
468 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
469 goto out;
470
471 if (found_key.offset < last_offset) {
472 WARN_ON(1);
473 btrfs_print_leaf(root, leaf);
474 printk("inode %lu found offset %Lu expected %Lu\n",
475 inode->i_ino, found_key.offset, last_offset);
476 err = 1;
477 goto out;
478 }
479 extent = btrfs_item_ptr(leaf, slot,
480 struct btrfs_file_extent_item);
481 found_type = btrfs_file_extent_type(leaf, extent);
482 if (found_type == BTRFS_FILE_EXTENT_REG) {
483 extent_end = found_key.offset +
484 btrfs_file_extent_num_bytes(leaf, extent);
485 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
486 struct btrfs_item *item;
487 item = btrfs_item_nr(leaf, slot);
488 extent_end = found_key.offset +
489 btrfs_file_extent_inline_len(leaf, item);
490 extent_end = (extent_end + root->sectorsize - 1) &
491 ~((u64)root->sectorsize -1 );
492 }
493 last_offset = extent_end;
494 path->slots[0]++;
495 }
496 if (0 && last_offset < inode->i_size) {
497 WARN_ON(1);
498 btrfs_print_leaf(root, leaf);
499 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
500 last_offset, inode->i_size);
501 err = 1;
502
503 }
504 out:
505 btrfs_free_path(path);
506 return err;
507 #endif
508 }
509
510 /*
511 * this is very complex, but the basic idea is to drop all extents
512 * in the range start - end. hint_block is filled in with a block number
513 * that would be a good hint to the block allocator for this file.
514 *
515 * If an extent intersects the range but is not entirely inside the range
516 * it is either truncated or split. Anything entirely inside the range
517 * is deleted from the tree.
518 */
519 int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
520 struct btrfs_root *root, struct inode *inode,
521 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
522 {
523 u64 extent_end = 0;
524 u64 search_start = start;
525 struct extent_buffer *leaf;
526 struct btrfs_file_extent_item *extent;
527 struct btrfs_path *path;
528 struct btrfs_key key;
529 struct btrfs_file_extent_item old;
530 int keep;
531 int slot;
532 int bookend;
533 int found_type;
534 int found_extent;
535 int found_inline;
536 int recow;
537 int ret;
538
539 btrfs_drop_extent_cache(inode, start, end - 1);
540
541 path = btrfs_alloc_path();
542 if (!path)
543 return -ENOMEM;
544 while(1) {
545 recow = 0;
546 btrfs_release_path(root, path);
547 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
548 search_start, -1);
549 if (ret < 0)
550 goto out;
551 if (ret > 0) {
552 if (path->slots[0] == 0) {
553 ret = 0;
554 goto out;
555 }
556 path->slots[0]--;
557 }
558 next_slot:
559 keep = 0;
560 bookend = 0;
561 found_extent = 0;
562 found_inline = 0;
563 extent = NULL;
564 leaf = path->nodes[0];
565 slot = path->slots[0];
566 ret = 0;
567 btrfs_item_key_to_cpu(leaf, &key, slot);
568 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
569 key.offset >= end) {
570 goto out;
571 }
572 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
573 key.objectid != inode->i_ino) {
574 goto out;
575 }
576 if (recow) {
577 search_start = key.offset;
578 continue;
579 }
580 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
581 extent = btrfs_item_ptr(leaf, slot,
582 struct btrfs_file_extent_item);
583 found_type = btrfs_file_extent_type(leaf, extent);
584 if (found_type == BTRFS_FILE_EXTENT_REG) {
585 extent_end =
586 btrfs_file_extent_disk_bytenr(leaf,
587 extent);
588 if (extent_end)
589 *hint_byte = extent_end;
590
591 extent_end = key.offset +
592 btrfs_file_extent_num_bytes(leaf, extent);
593 found_extent = 1;
594 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
595 struct btrfs_item *item;
596 item = btrfs_item_nr(leaf, slot);
597 found_inline = 1;
598 extent_end = key.offset +
599 btrfs_file_extent_inline_len(leaf, item);
600 }
601 } else {
602 extent_end = search_start;
603 }
604
605 /* we found nothing we can drop */
606 if ((!found_extent && !found_inline) ||
607 search_start >= extent_end) {
608 int nextret;
609 u32 nritems;
610 nritems = btrfs_header_nritems(leaf);
611 if (slot >= nritems - 1) {
612 nextret = btrfs_next_leaf(root, path);
613 if (nextret)
614 goto out;
615 recow = 1;
616 } else {
617 path->slots[0]++;
618 }
619 goto next_slot;
620 }
621
622 if (found_inline) {
623 u64 mask = root->sectorsize - 1;
624 search_start = (extent_end + mask) & ~mask;
625 } else
626 search_start = extent_end;
627 if (end <= extent_end && start >= key.offset && found_inline) {
628 *hint_byte = EXTENT_MAP_INLINE;
629 continue;
630 }
631 if (end < extent_end && end >= key.offset) {
632 if (found_extent) {
633 u64 disk_bytenr =
634 btrfs_file_extent_disk_bytenr(leaf, extent);
635 u64 disk_num_bytes =
636 btrfs_file_extent_disk_num_bytes(leaf,
637 extent);
638 read_extent_buffer(leaf, &old,
639 (unsigned long)extent,
640 sizeof(old));
641 if (disk_bytenr != 0) {
642 ret = btrfs_inc_extent_ref(trans, root,
643 disk_bytenr, disk_num_bytes,
644 root->root_key.objectid,
645 trans->transid,
646 key.objectid, end);
647 BUG_ON(ret);
648 }
649 }
650 bookend = 1;
651 if (found_inline && start <= key.offset)
652 keep = 1;
653 }
654 /* truncate existing extent */
655 if (start > key.offset) {
656 u64 new_num;
657 u64 old_num;
658 keep = 1;
659 WARN_ON(start & (root->sectorsize - 1));
660 if (found_extent) {
661 new_num = start - key.offset;
662 old_num = btrfs_file_extent_num_bytes(leaf,
663 extent);
664 *hint_byte =
665 btrfs_file_extent_disk_bytenr(leaf,
666 extent);
667 if (btrfs_file_extent_disk_bytenr(leaf,
668 extent)) {
669 dec_i_blocks(inode, old_num - new_num);
670 }
671 btrfs_set_file_extent_num_bytes(leaf, extent,
672 new_num);
673 btrfs_mark_buffer_dirty(leaf);
674 } else if (key.offset < inline_limit &&
675 (end > extent_end) &&
676 (inline_limit < extent_end)) {
677 u32 new_size;
678 new_size = btrfs_file_extent_calc_inline_size(
679 inline_limit - key.offset);
680 dec_i_blocks(inode, (extent_end - key.offset) -
681 (inline_limit - key.offset));
682 btrfs_truncate_item(trans, root, path,
683 new_size, 1);
684 }
685 }
686 /* delete the entire extent */
687 if (!keep) {
688 u64 disk_bytenr = 0;
689 u64 disk_num_bytes = 0;
690 u64 extent_num_bytes = 0;
691 u64 root_gen;
692 u64 root_owner;
693
694 root_gen = btrfs_header_generation(leaf);
695 root_owner = btrfs_header_owner(leaf);
696 if (found_extent) {
697 disk_bytenr =
698 btrfs_file_extent_disk_bytenr(leaf,
699 extent);
700 disk_num_bytes =
701 btrfs_file_extent_disk_num_bytes(leaf,
702 extent);
703 extent_num_bytes =
704 btrfs_file_extent_num_bytes(leaf, extent);
705 *hint_byte =
706 btrfs_file_extent_disk_bytenr(leaf,
707 extent);
708 }
709 ret = btrfs_del_item(trans, root, path);
710 /* TODO update progress marker and return */
711 BUG_ON(ret);
712 btrfs_release_path(root, path);
713 extent = NULL;
714 if (found_extent && disk_bytenr != 0) {
715 dec_i_blocks(inode, extent_num_bytes);
716 ret = btrfs_free_extent(trans, root,
717 disk_bytenr,
718 disk_num_bytes,
719 root_owner,
720 root_gen, inode->i_ino,
721 key.offset, 0);
722 }
723
724 BUG_ON(ret);
725 if (!bookend && search_start >= end) {
726 ret = 0;
727 goto out;
728 }
729 if (!bookend)
730 continue;
731 }
732 if (bookend && found_inline && start <= key.offset) {
733 u32 new_size;
734 new_size = btrfs_file_extent_calc_inline_size(
735 extent_end - end);
736 dec_i_blocks(inode, (extent_end - key.offset) -
737 (extent_end - end));
738 btrfs_truncate_item(trans, root, path, new_size, 0);
739 }
740 /* create bookend, splitting the extent in two */
741 if (bookend && found_extent) {
742 struct btrfs_key ins;
743 ins.objectid = inode->i_ino;
744 ins.offset = end;
745 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
746 btrfs_release_path(root, path);
747 ret = btrfs_insert_empty_item(trans, root, path, &ins,
748 sizeof(*extent));
749
750 leaf = path->nodes[0];
751 if (ret) {
752 btrfs_print_leaf(root, leaf);
753 printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
754 }
755 BUG_ON(ret);
756 extent = btrfs_item_ptr(leaf, path->slots[0],
757 struct btrfs_file_extent_item);
758 write_extent_buffer(leaf, &old,
759 (unsigned long)extent, sizeof(old));
760
761 btrfs_set_file_extent_offset(leaf, extent,
762 le64_to_cpu(old.offset) + end - key.offset);
763 WARN_ON(le64_to_cpu(old.num_bytes) <
764 (extent_end - end));
765 btrfs_set_file_extent_num_bytes(leaf, extent,
766 extent_end - end);
767 btrfs_set_file_extent_type(leaf, extent,
768 BTRFS_FILE_EXTENT_REG);
769
770 btrfs_mark_buffer_dirty(path->nodes[0]);
771 if (le64_to_cpu(old.disk_bytenr) != 0) {
772 inode->i_blocks +=
773 btrfs_file_extent_num_bytes(leaf,
774 extent) >> 9;
775 }
776 ret = 0;
777 goto out;
778 }
779 }
780 out:
781 btrfs_free_path(path);
782 btrfs_check_file(root, inode);
783 return ret;
784 }
785
786 /*
787 * this gets pages into the page cache and locks them down
788 */
789 static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
790 struct page **pages, size_t num_pages,
791 loff_t pos, unsigned long first_index,
792 unsigned long last_index, size_t write_bytes)
793 {
794 int i;
795 unsigned long index = pos >> PAGE_CACHE_SHIFT;
796 struct inode *inode = fdentry(file)->d_inode;
797 int err = 0;
798 u64 start_pos;
799 u64 last_pos;
800
801 start_pos = pos & ~((u64)root->sectorsize - 1);
802 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
803
804 memset(pages, 0, num_pages * sizeof(struct page *));
805 again:
806 for (i = 0; i < num_pages; i++) {
807 pages[i] = grab_cache_page(inode->i_mapping, index + i);
808 if (!pages[i]) {
809 err = -ENOMEM;
810 BUG_ON(1);
811 }
812 wait_on_page_writeback(pages[i]);
813 }
814 if (start_pos < inode->i_size) {
815 struct btrfs_ordered_extent *ordered;
816 lock_extent(&BTRFS_I(inode)->io_tree,
817 start_pos, last_pos - 1, GFP_NOFS);
818 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
819 if (ordered &&
820 ordered->file_offset + ordered->len > start_pos &&
821 ordered->file_offset < last_pos) {
822 btrfs_put_ordered_extent(ordered);
823 unlock_extent(&BTRFS_I(inode)->io_tree,
824 start_pos, last_pos - 1, GFP_NOFS);
825 for (i = 0; i < num_pages; i++) {
826 unlock_page(pages[i]);
827 page_cache_release(pages[i]);
828 }
829 btrfs_wait_ordered_range(inode, start_pos,
830 last_pos - start_pos);
831 goto again;
832 }
833 if (ordered)
834 btrfs_put_ordered_extent(ordered);
835
836 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
837 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
838 GFP_NOFS);
839 unlock_extent(&BTRFS_I(inode)->io_tree,
840 start_pos, last_pos - 1, GFP_NOFS);
841 }
842 for (i = 0; i < num_pages; i++) {
843 clear_page_dirty_for_io(pages[i]);
844 set_page_extent_mapped(pages[i]);
845 WARN_ON(!PageLocked(pages[i]));
846 }
847 return 0;
848 }
849
850 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
851 size_t count, loff_t *ppos)
852 {
853 loff_t pos;
854 loff_t start_pos;
855 ssize_t num_written = 0;
856 ssize_t err = 0;
857 int ret = 0;
858 struct inode *inode = fdentry(file)->d_inode;
859 struct btrfs_root *root = BTRFS_I(inode)->root;
860 struct page **pages = NULL;
861 int nrptrs;
862 struct page *pinned[2];
863 unsigned long first_index;
864 unsigned long last_index;
865
866 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
867 PAGE_CACHE_SIZE / (sizeof(struct page *)));
868 pinned[0] = NULL;
869 pinned[1] = NULL;
870
871 pos = *ppos;
872 start_pos = pos;
873
874 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
875 current->backing_dev_info = inode->i_mapping->backing_dev_info;
876 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
877 if (err)
878 goto out_nolock;
879 if (count == 0)
880 goto out_nolock;
881 #ifdef REMOVE_SUID_PATH
882 err = remove_suid(&file->f_path);
883 #else
884 # if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
885 err = file_remove_suid(file);
886 # else
887 err = remove_suid(fdentry(file));
888 # endif
889 #endif
890 if (err)
891 goto out_nolock;
892 file_update_time(file);
893
894 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
895
896 mutex_lock(&inode->i_mutex);
897 first_index = pos >> PAGE_CACHE_SHIFT;
898 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
899
900 /*
901 * if this is a nodatasum mount, force summing off for the inode
902 * all the time. That way a later mount with summing on won't
903 * get confused
904 */
905 if (btrfs_test_opt(root, NODATASUM))
906 btrfs_set_flag(inode, NODATASUM);
907
908 /*
909 * there are lots of better ways to do this, but this code
910 * makes sure the first and last page in the file range are
911 * up to date and ready for cow
912 */
913 if ((pos & (PAGE_CACHE_SIZE - 1))) {
914 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
915 if (!PageUptodate(pinned[0])) {
916 ret = btrfs_readpage(NULL, pinned[0]);
917 BUG_ON(ret);
918 wait_on_page_locked(pinned[0]);
919 } else {
920 unlock_page(pinned[0]);
921 }
922 }
923 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
924 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
925 if (!PageUptodate(pinned[1])) {
926 ret = btrfs_readpage(NULL, pinned[1]);
927 BUG_ON(ret);
928 wait_on_page_locked(pinned[1]);
929 } else {
930 unlock_page(pinned[1]);
931 }
932 }
933
934 while(count > 0) {
935 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
936 size_t write_bytes = min(count, nrptrs *
937 (size_t)PAGE_CACHE_SIZE -
938 offset);
939 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
940 PAGE_CACHE_SHIFT;
941
942 WARN_ON(num_pages > nrptrs);
943 memset(pages, 0, sizeof(pages));
944
945 ret = btrfs_check_free_space(root, write_bytes, 0);
946 if (ret)
947 goto out;
948
949 ret = prepare_pages(root, file, pages, num_pages,
950 pos, first_index, last_index,
951 write_bytes);
952 if (ret)
953 goto out;
954
955 ret = btrfs_copy_from_user(pos, num_pages,
956 write_bytes, pages, buf);
957 if (ret) {
958 btrfs_drop_pages(pages, num_pages);
959 goto out;
960 }
961
962 ret = dirty_and_release_pages(NULL, root, file, pages,
963 num_pages, pos, write_bytes);
964 btrfs_drop_pages(pages, num_pages);
965 if (ret)
966 goto out;
967
968 buf += write_bytes;
969 count -= write_bytes;
970 pos += write_bytes;
971 num_written += write_bytes;
972
973 balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
974 if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
975 btrfs_btree_balance_dirty(root, 1);
976 btrfs_throttle(root);
977 cond_resched();
978 }
979 out:
980 mutex_unlock(&inode->i_mutex);
981
982 out_nolock:
983 kfree(pages);
984 if (pinned[0])
985 page_cache_release(pinned[0]);
986 if (pinned[1])
987 page_cache_release(pinned[1]);
988 *ppos = pos;
989
990 if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
991 err = sync_page_range(inode, inode->i_mapping,
992 start_pos, num_written);
993 if (err < 0)
994 num_written = err;
995 } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
996 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
997 do_sync_file_range(file, start_pos,
998 start_pos + num_written - 1,
999 SYNC_FILE_RANGE_WRITE |
1000 SYNC_FILE_RANGE_WAIT_AFTER);
1001 #else
1002 do_sync_mapping_range(inode->i_mapping, start_pos,
1003 start_pos + num_written - 1,
1004 SYNC_FILE_RANGE_WRITE |
1005 SYNC_FILE_RANGE_WAIT_AFTER);
1006 #endif
1007 invalidate_mapping_pages(inode->i_mapping,
1008 start_pos >> PAGE_CACHE_SHIFT,
1009 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1010 }
1011 current->backing_dev_info = NULL;
1012 return num_written ? num_written : err;
1013 }
1014
1015 int btrfs_release_file(struct inode * inode, struct file * filp)
1016 {
1017 if (filp->private_data)
1018 btrfs_ioctl_trans_end(filp);
1019 return 0;
1020 }
1021
1022 static int btrfs_sync_file(struct file *file,
1023 struct dentry *dentry, int datasync)
1024 {
1025 struct inode *inode = dentry->d_inode;
1026 struct btrfs_root *root = BTRFS_I(inode)->root;
1027 int ret = 0;
1028 struct btrfs_trans_handle *trans;
1029
1030 /*
1031 * check the transaction that last modified this inode
1032 * and see if its already been committed
1033 */
1034 if (!BTRFS_I(inode)->last_trans)
1035 goto out;
1036
1037 mutex_lock(&root->fs_info->trans_mutex);
1038 if (BTRFS_I(inode)->last_trans <=
1039 root->fs_info->last_trans_committed) {
1040 BTRFS_I(inode)->last_trans = 0;
1041 mutex_unlock(&root->fs_info->trans_mutex);
1042 goto out;
1043 }
1044 mutex_unlock(&root->fs_info->trans_mutex);
1045
1046 /*
1047 * ok we haven't committed the transaction yet, lets do a commit
1048 */
1049 if (file->private_data)
1050 btrfs_ioctl_trans_end(file);
1051
1052 trans = btrfs_start_transaction(root, 1);
1053 if (!trans) {
1054 ret = -ENOMEM;
1055 goto out;
1056 }
1057 ret = btrfs_commit_transaction(trans, root);
1058 out:
1059 return ret > 0 ? EIO : ret;
1060 }
1061
1062 static struct vm_operations_struct btrfs_file_vm_ops = {
1063 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1064 .nopage = filemap_nopage,
1065 .populate = filemap_populate,
1066 #else
1067 .fault = filemap_fault,
1068 #endif
1069 .page_mkwrite = btrfs_page_mkwrite,
1070 };
1071
1072 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1073 {
1074 vma->vm_ops = &btrfs_file_vm_ops;
1075 file_accessed(filp);
1076 return 0;
1077 }
1078
1079 struct file_operations btrfs_file_operations = {
1080 .llseek = generic_file_llseek,
1081 .read = do_sync_read,
1082 .aio_read = generic_file_aio_read,
1083 .splice_read = generic_file_splice_read,
1084 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1085 .sendfile = generic_file_sendfile,
1086 #endif
1087 .write = btrfs_file_write,
1088 .mmap = btrfs_file_mmap,
1089 .open = generic_file_open,
1090 .release = btrfs_release_file,
1091 .fsync = btrfs_sync_file,
1092 .unlocked_ioctl = btrfs_ioctl,
1093 #ifdef CONFIG_COMPAT
1094 .compat_ioctl = btrfs_ioctl,
1095 #endif
1096 };
This page took 0.07847 seconds and 5 git commands to generate.