Merge branch 'slab/urgent' into for-linus
[deliverable/linux.git] / fs / btrfs / file.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "ioctl.h"
37 #include "print-tree.h"
38 #include "tree-log.h"
39 #include "locking.h"
40 #include "compat.h"
41
42
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
45 */
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47 int write_bytes,
48 struct page **prepared_pages,
49 const char __user *buf)
50 {
51 long page_fault = 0;
52 int i;
53 int offset = pos & (PAGE_CACHE_SIZE - 1);
54
55 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
56 size_t count = min_t(size_t,
57 PAGE_CACHE_SIZE - offset, write_bytes);
58 struct page *page = prepared_pages[i];
59 fault_in_pages_readable(buf, count);
60
61 /* Copy data from userspace to the current page */
62 kmap(page);
63 page_fault = __copy_from_user(page_address(page) + offset,
64 buf, count);
65 /* Flush processor's dcache for this page */
66 flush_dcache_page(page);
67 kunmap(page);
68 buf += count;
69 write_bytes -= count;
70
71 if (page_fault)
72 break;
73 }
74 return page_fault ? -EFAULT : 0;
75 }
76
77 /*
78 * unlocks pages after btrfs_file_write is done with them
79 */
80 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
81 {
82 size_t i;
83 for (i = 0; i < num_pages; i++) {
84 if (!pages[i])
85 break;
86 /* page checked is some magic around finding pages that
87 * have been modified without going through btrfs_set_page_dirty
88 * clear it here
89 */
90 ClearPageChecked(pages[i]);
91 unlock_page(pages[i]);
92 mark_page_accessed(pages[i]);
93 page_cache_release(pages[i]);
94 }
95 }
96
97 /*
98 * after copy_from_user, pages need to be dirtied and we need to make
99 * sure holes are created between the current EOF and the start of
100 * any next extents (if required).
101 *
102 * this also makes the decision about creating an inline extent vs
103 * doing real data extents, marking pages dirty and delalloc as required.
104 */
105 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct file *file,
108 struct page **pages,
109 size_t num_pages,
110 loff_t pos,
111 size_t write_bytes)
112 {
113 int err = 0;
114 int i;
115 struct inode *inode = fdentry(file)->d_inode;
116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
117 u64 hint_byte;
118 u64 num_bytes;
119 u64 start_pos;
120 u64 end_of_last_block;
121 u64 end_pos = pos + write_bytes;
122 loff_t isize = i_size_read(inode);
123
124 start_pos = pos & ~((u64)root->sectorsize - 1);
125 num_bytes = (write_bytes + pos - start_pos +
126 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
127
128 end_of_last_block = start_pos + num_bytes - 1;
129
130 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
131 trans = btrfs_join_transaction(root, 1);
132 if (!trans) {
133 err = -ENOMEM;
134 goto out_unlock;
135 }
136 btrfs_set_trans_block_group(trans, inode);
137 hint_byte = 0;
138
139 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
140
141 /* check for reserved extents on each page, we don't want
142 * to reset the delalloc bit on things that already have
143 * extents reserved.
144 */
145 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
146 for (i = 0; i < num_pages; i++) {
147 struct page *p = pages[i];
148 SetPageUptodate(p);
149 ClearPageChecked(p);
150 set_page_dirty(p);
151 }
152 if (end_pos > isize) {
153 i_size_write(inode, end_pos);
154 /* we've only changed i_size in ram, and we haven't updated
155 * the disk i_size. There is no need to log the inode
156 * at this time.
157 */
158 }
159 err = btrfs_end_transaction(trans, root);
160 out_unlock:
161 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
162 return err;
163 }
164
165 /*
166 * this drops all the extents in the cache that intersect the range
167 * [start, end]. Existing extents are split as required.
168 */
169 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
170 int skip_pinned)
171 {
172 struct extent_map *em;
173 struct extent_map *split = NULL;
174 struct extent_map *split2 = NULL;
175 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
176 u64 len = end - start + 1;
177 int ret;
178 int testend = 1;
179 unsigned long flags;
180 int compressed = 0;
181
182 WARN_ON(end < start);
183 if (end == (u64)-1) {
184 len = (u64)-1;
185 testend = 0;
186 }
187 while (1) {
188 if (!split)
189 split = alloc_extent_map(GFP_NOFS);
190 if (!split2)
191 split2 = alloc_extent_map(GFP_NOFS);
192
193 spin_lock(&em_tree->lock);
194 em = lookup_extent_mapping(em_tree, start, len);
195 if (!em) {
196 spin_unlock(&em_tree->lock);
197 break;
198 }
199 flags = em->flags;
200 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
201 spin_unlock(&em_tree->lock);
202 if (em->start <= start &&
203 (!testend || em->start + em->len >= start + len)) {
204 free_extent_map(em);
205 break;
206 }
207 if (start < em->start) {
208 len = em->start - start;
209 } else {
210 len = start + len - (em->start + em->len);
211 start = em->start + em->len;
212 }
213 free_extent_map(em);
214 continue;
215 }
216 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
217 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
218 remove_extent_mapping(em_tree, em);
219
220 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
221 em->start < start) {
222 split->start = em->start;
223 split->len = start - em->start;
224 split->orig_start = em->orig_start;
225 split->block_start = em->block_start;
226
227 if (compressed)
228 split->block_len = em->block_len;
229 else
230 split->block_len = split->len;
231
232 split->bdev = em->bdev;
233 split->flags = flags;
234 ret = add_extent_mapping(em_tree, split);
235 BUG_ON(ret);
236 free_extent_map(split);
237 split = split2;
238 split2 = NULL;
239 }
240 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
241 testend && em->start + em->len > start + len) {
242 u64 diff = start + len - em->start;
243
244 split->start = start + len;
245 split->len = em->start + em->len - (start + len);
246 split->bdev = em->bdev;
247 split->flags = flags;
248
249 if (compressed) {
250 split->block_len = em->block_len;
251 split->block_start = em->block_start;
252 split->orig_start = em->orig_start;
253 } else {
254 split->block_len = split->len;
255 split->block_start = em->block_start + diff;
256 split->orig_start = split->start;
257 }
258
259 ret = add_extent_mapping(em_tree, split);
260 BUG_ON(ret);
261 free_extent_map(split);
262 split = NULL;
263 }
264 spin_unlock(&em_tree->lock);
265
266 /* once for us */
267 free_extent_map(em);
268 /* once for the tree*/
269 free_extent_map(em);
270 }
271 if (split)
272 free_extent_map(split);
273 if (split2)
274 free_extent_map(split2);
275 return 0;
276 }
277
278 /*
279 * this is very complex, but the basic idea is to drop all extents
280 * in the range start - end. hint_block is filled in with a block number
281 * that would be a good hint to the block allocator for this file.
282 *
283 * If an extent intersects the range but is not entirely inside the range
284 * it is either truncated or split. Anything entirely inside the range
285 * is deleted from the tree.
286 *
287 * inline_limit is used to tell this code which offsets in the file to keep
288 * if they contain inline extents.
289 */
290 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
291 struct btrfs_root *root, struct inode *inode,
292 u64 start, u64 end, u64 locked_end,
293 u64 inline_limit, u64 *hint_byte)
294 {
295 u64 extent_end = 0;
296 u64 search_start = start;
297 u64 ram_bytes = 0;
298 u64 disk_bytenr = 0;
299 u64 orig_locked_end = locked_end;
300 u8 compression;
301 u8 encryption;
302 u16 other_encoding = 0;
303 struct extent_buffer *leaf;
304 struct btrfs_file_extent_item *extent;
305 struct btrfs_path *path;
306 struct btrfs_key key;
307 struct btrfs_file_extent_item old;
308 int keep;
309 int slot;
310 int bookend;
311 int found_type = 0;
312 int found_extent;
313 int found_inline;
314 int recow;
315 int ret;
316
317 inline_limit = 0;
318 btrfs_drop_extent_cache(inode, start, end - 1, 0);
319
320 path = btrfs_alloc_path();
321 if (!path)
322 return -ENOMEM;
323 while (1) {
324 recow = 0;
325 btrfs_release_path(root, path);
326 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
327 search_start, -1);
328 if (ret < 0)
329 goto out;
330 if (ret > 0) {
331 if (path->slots[0] == 0) {
332 ret = 0;
333 goto out;
334 }
335 path->slots[0]--;
336 }
337 next_slot:
338 keep = 0;
339 bookend = 0;
340 found_extent = 0;
341 found_inline = 0;
342 compression = 0;
343 encryption = 0;
344 extent = NULL;
345 leaf = path->nodes[0];
346 slot = path->slots[0];
347 ret = 0;
348 btrfs_item_key_to_cpu(leaf, &key, slot);
349 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
350 key.offset >= end) {
351 goto out;
352 }
353 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
354 key.objectid != inode->i_ino) {
355 goto out;
356 }
357 if (recow) {
358 search_start = max(key.offset, start);
359 continue;
360 }
361 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
362 extent = btrfs_item_ptr(leaf, slot,
363 struct btrfs_file_extent_item);
364 found_type = btrfs_file_extent_type(leaf, extent);
365 compression = btrfs_file_extent_compression(leaf,
366 extent);
367 encryption = btrfs_file_extent_encryption(leaf,
368 extent);
369 other_encoding = btrfs_file_extent_other_encoding(leaf,
370 extent);
371 if (found_type == BTRFS_FILE_EXTENT_REG ||
372 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
373 extent_end =
374 btrfs_file_extent_disk_bytenr(leaf,
375 extent);
376 if (extent_end)
377 *hint_byte = extent_end;
378
379 extent_end = key.offset +
380 btrfs_file_extent_num_bytes(leaf, extent);
381 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
382 extent);
383 found_extent = 1;
384 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
385 found_inline = 1;
386 extent_end = key.offset +
387 btrfs_file_extent_inline_len(leaf, extent);
388 }
389 } else {
390 extent_end = search_start;
391 }
392
393 /* we found nothing we can drop */
394 if ((!found_extent && !found_inline) ||
395 search_start >= extent_end) {
396 int nextret;
397 u32 nritems;
398 nritems = btrfs_header_nritems(leaf);
399 if (slot >= nritems - 1) {
400 nextret = btrfs_next_leaf(root, path);
401 if (nextret)
402 goto out;
403 recow = 1;
404 } else {
405 path->slots[0]++;
406 }
407 goto next_slot;
408 }
409
410 if (end <= extent_end && start >= key.offset && found_inline)
411 *hint_byte = EXTENT_MAP_INLINE;
412
413 if (found_extent) {
414 read_extent_buffer(leaf, &old, (unsigned long)extent,
415 sizeof(old));
416 }
417
418 if (end < extent_end && end >= key.offset) {
419 bookend = 1;
420 if (found_inline && start <= key.offset)
421 keep = 1;
422 }
423
424 if (bookend && found_extent) {
425 if (locked_end < extent_end) {
426 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
427 locked_end, extent_end - 1,
428 GFP_NOFS);
429 if (!ret) {
430 btrfs_release_path(root, path);
431 lock_extent(&BTRFS_I(inode)->io_tree,
432 locked_end, extent_end - 1,
433 GFP_NOFS);
434 locked_end = extent_end;
435 continue;
436 }
437 locked_end = extent_end;
438 }
439 disk_bytenr = le64_to_cpu(old.disk_bytenr);
440 if (disk_bytenr != 0) {
441 ret = btrfs_inc_extent_ref(trans, root,
442 disk_bytenr,
443 le64_to_cpu(old.disk_num_bytes), 0,
444 root->root_key.objectid,
445 key.objectid, key.offset -
446 le64_to_cpu(old.offset));
447 BUG_ON(ret);
448 }
449 }
450
451 if (found_inline) {
452 u64 mask = root->sectorsize - 1;
453 search_start = (extent_end + mask) & ~mask;
454 } else
455 search_start = extent_end;
456
457 /* truncate existing extent */
458 if (start > key.offset) {
459 u64 new_num;
460 u64 old_num;
461 keep = 1;
462 WARN_ON(start & (root->sectorsize - 1));
463 if (found_extent) {
464 new_num = start - key.offset;
465 old_num = btrfs_file_extent_num_bytes(leaf,
466 extent);
467 *hint_byte =
468 btrfs_file_extent_disk_bytenr(leaf,
469 extent);
470 if (btrfs_file_extent_disk_bytenr(leaf,
471 extent)) {
472 inode_sub_bytes(inode, old_num -
473 new_num);
474 }
475 btrfs_set_file_extent_num_bytes(leaf,
476 extent, new_num);
477 btrfs_mark_buffer_dirty(leaf);
478 } else if (key.offset < inline_limit &&
479 (end > extent_end) &&
480 (inline_limit < extent_end)) {
481 u32 new_size;
482 new_size = btrfs_file_extent_calc_inline_size(
483 inline_limit - key.offset);
484 inode_sub_bytes(inode, extent_end -
485 inline_limit);
486 btrfs_set_file_extent_ram_bytes(leaf, extent,
487 new_size);
488 if (!compression && !encryption) {
489 btrfs_truncate_item(trans, root, path,
490 new_size, 1);
491 }
492 }
493 }
494 /* delete the entire extent */
495 if (!keep) {
496 if (found_inline)
497 inode_sub_bytes(inode, extent_end -
498 key.offset);
499 ret = btrfs_del_item(trans, root, path);
500 /* TODO update progress marker and return */
501 BUG_ON(ret);
502 extent = NULL;
503 btrfs_release_path(root, path);
504 /* the extent will be freed later */
505 }
506 if (bookend && found_inline && start <= key.offset) {
507 u32 new_size;
508 new_size = btrfs_file_extent_calc_inline_size(
509 extent_end - end);
510 inode_sub_bytes(inode, end - key.offset);
511 btrfs_set_file_extent_ram_bytes(leaf, extent,
512 new_size);
513 if (!compression && !encryption)
514 ret = btrfs_truncate_item(trans, root, path,
515 new_size, 0);
516 BUG_ON(ret);
517 }
518 /* create bookend, splitting the extent in two */
519 if (bookend && found_extent) {
520 struct btrfs_key ins;
521 ins.objectid = inode->i_ino;
522 ins.offset = end;
523 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
524
525 btrfs_release_path(root, path);
526 path->leave_spinning = 1;
527 ret = btrfs_insert_empty_item(trans, root, path, &ins,
528 sizeof(*extent));
529 BUG_ON(ret);
530
531 leaf = path->nodes[0];
532 extent = btrfs_item_ptr(leaf, path->slots[0],
533 struct btrfs_file_extent_item);
534 write_extent_buffer(leaf, &old,
535 (unsigned long)extent, sizeof(old));
536
537 btrfs_set_file_extent_compression(leaf, extent,
538 compression);
539 btrfs_set_file_extent_encryption(leaf, extent,
540 encryption);
541 btrfs_set_file_extent_other_encoding(leaf, extent,
542 other_encoding);
543 btrfs_set_file_extent_offset(leaf, extent,
544 le64_to_cpu(old.offset) + end - key.offset);
545 WARN_ON(le64_to_cpu(old.num_bytes) <
546 (extent_end - end));
547 btrfs_set_file_extent_num_bytes(leaf, extent,
548 extent_end - end);
549
550 /*
551 * set the ram bytes to the size of the full extent
552 * before splitting. This is a worst case flag,
553 * but its the best we can do because we don't know
554 * how splitting affects compression
555 */
556 btrfs_set_file_extent_ram_bytes(leaf, extent,
557 ram_bytes);
558 btrfs_set_file_extent_type(leaf, extent, found_type);
559
560 btrfs_unlock_up_safe(path, 1);
561 btrfs_mark_buffer_dirty(path->nodes[0]);
562 btrfs_set_lock_blocking(path->nodes[0]);
563
564 path->leave_spinning = 0;
565 btrfs_release_path(root, path);
566 if (disk_bytenr != 0)
567 inode_add_bytes(inode, extent_end - end);
568 }
569
570 if (found_extent && !keep) {
571 u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
572
573 if (old_disk_bytenr != 0) {
574 inode_sub_bytes(inode,
575 le64_to_cpu(old.num_bytes));
576 ret = btrfs_free_extent(trans, root,
577 old_disk_bytenr,
578 le64_to_cpu(old.disk_num_bytes),
579 0, root->root_key.objectid,
580 key.objectid, key.offset -
581 le64_to_cpu(old.offset));
582 BUG_ON(ret);
583 *hint_byte = old_disk_bytenr;
584 }
585 }
586
587 if (search_start >= end) {
588 ret = 0;
589 goto out;
590 }
591 }
592 out:
593 btrfs_free_path(path);
594 if (locked_end > orig_locked_end) {
595 unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
596 locked_end - 1, GFP_NOFS);
597 }
598 return ret;
599 }
600
601 static int extent_mergeable(struct extent_buffer *leaf, int slot,
602 u64 objectid, u64 bytenr, u64 *start, u64 *end)
603 {
604 struct btrfs_file_extent_item *fi;
605 struct btrfs_key key;
606 u64 extent_end;
607
608 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
609 return 0;
610
611 btrfs_item_key_to_cpu(leaf, &key, slot);
612 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
613 return 0;
614
615 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
616 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
617 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
618 btrfs_file_extent_compression(leaf, fi) ||
619 btrfs_file_extent_encryption(leaf, fi) ||
620 btrfs_file_extent_other_encoding(leaf, fi))
621 return 0;
622
623 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
624 if ((*start && *start != key.offset) || (*end && *end != extent_end))
625 return 0;
626
627 *start = key.offset;
628 *end = extent_end;
629 return 1;
630 }
631
632 /*
633 * Mark extent in the range start - end as written.
634 *
635 * This changes extent type from 'pre-allocated' to 'regular'. If only
636 * part of extent is marked as written, the extent will be split into
637 * two or three.
638 */
639 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
640 struct btrfs_root *root,
641 struct inode *inode, u64 start, u64 end)
642 {
643 struct extent_buffer *leaf;
644 struct btrfs_path *path;
645 struct btrfs_file_extent_item *fi;
646 struct btrfs_key key;
647 u64 bytenr;
648 u64 num_bytes;
649 u64 extent_end;
650 u64 orig_offset;
651 u64 other_start;
652 u64 other_end;
653 u64 split = start;
654 u64 locked_end = end;
655 int extent_type;
656 int split_end = 1;
657 int ret;
658
659 btrfs_drop_extent_cache(inode, start, end - 1, 0);
660
661 path = btrfs_alloc_path();
662 BUG_ON(!path);
663 again:
664 key.objectid = inode->i_ino;
665 key.type = BTRFS_EXTENT_DATA_KEY;
666 if (split == start)
667 key.offset = split;
668 else
669 key.offset = split - 1;
670
671 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
672 if (ret > 0 && path->slots[0] > 0)
673 path->slots[0]--;
674
675 leaf = path->nodes[0];
676 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
677 BUG_ON(key.objectid != inode->i_ino ||
678 key.type != BTRFS_EXTENT_DATA_KEY);
679 fi = btrfs_item_ptr(leaf, path->slots[0],
680 struct btrfs_file_extent_item);
681 extent_type = btrfs_file_extent_type(leaf, fi);
682 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
683 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
684 BUG_ON(key.offset > start || extent_end < end);
685
686 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
687 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
688 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
689
690 if (key.offset == start)
691 split = end;
692
693 if (key.offset == start && extent_end == end) {
694 int del_nr = 0;
695 int del_slot = 0;
696 other_start = end;
697 other_end = 0;
698 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
699 bytenr, &other_start, &other_end)) {
700 extent_end = other_end;
701 del_slot = path->slots[0] + 1;
702 del_nr++;
703 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
704 0, root->root_key.objectid,
705 inode->i_ino, orig_offset);
706 BUG_ON(ret);
707 }
708 other_start = 0;
709 other_end = start;
710 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
711 bytenr, &other_start, &other_end)) {
712 key.offset = other_start;
713 del_slot = path->slots[0];
714 del_nr++;
715 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
716 0, root->root_key.objectid,
717 inode->i_ino, orig_offset);
718 BUG_ON(ret);
719 }
720 split_end = 0;
721 if (del_nr == 0) {
722 btrfs_set_file_extent_type(leaf, fi,
723 BTRFS_FILE_EXTENT_REG);
724 goto done;
725 }
726
727 fi = btrfs_item_ptr(leaf, del_slot - 1,
728 struct btrfs_file_extent_item);
729 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
730 btrfs_set_file_extent_num_bytes(leaf, fi,
731 extent_end - key.offset);
732 btrfs_mark_buffer_dirty(leaf);
733
734 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
735 BUG_ON(ret);
736 goto release;
737 } else if (split == start) {
738 if (locked_end < extent_end) {
739 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
740 locked_end, extent_end - 1, GFP_NOFS);
741 if (!ret) {
742 btrfs_release_path(root, path);
743 lock_extent(&BTRFS_I(inode)->io_tree,
744 locked_end, extent_end - 1, GFP_NOFS);
745 locked_end = extent_end;
746 goto again;
747 }
748 locked_end = extent_end;
749 }
750 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
751 } else {
752 BUG_ON(key.offset != start);
753 key.offset = split;
754 btrfs_set_file_extent_offset(leaf, fi, key.offset -
755 orig_offset);
756 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
757 btrfs_set_item_key_safe(trans, root, path, &key);
758 extent_end = split;
759 }
760
761 if (extent_end == end) {
762 split_end = 0;
763 extent_type = BTRFS_FILE_EXTENT_REG;
764 }
765 if (extent_end == end && split == start) {
766 other_start = end;
767 other_end = 0;
768 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
769 bytenr, &other_start, &other_end)) {
770 path->slots[0]++;
771 fi = btrfs_item_ptr(leaf, path->slots[0],
772 struct btrfs_file_extent_item);
773 key.offset = split;
774 btrfs_set_item_key_safe(trans, root, path, &key);
775 btrfs_set_file_extent_offset(leaf, fi, key.offset -
776 orig_offset);
777 btrfs_set_file_extent_num_bytes(leaf, fi,
778 other_end - split);
779 goto done;
780 }
781 }
782 if (extent_end == end && split == end) {
783 other_start = 0;
784 other_end = start;
785 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
786 bytenr, &other_start, &other_end)) {
787 path->slots[0]--;
788 fi = btrfs_item_ptr(leaf, path->slots[0],
789 struct btrfs_file_extent_item);
790 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
791 other_start);
792 goto done;
793 }
794 }
795
796 btrfs_mark_buffer_dirty(leaf);
797
798 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
799 root->root_key.objectid,
800 inode->i_ino, orig_offset);
801 BUG_ON(ret);
802 btrfs_release_path(root, path);
803
804 key.offset = start;
805 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
806 BUG_ON(ret);
807
808 leaf = path->nodes[0];
809 fi = btrfs_item_ptr(leaf, path->slots[0],
810 struct btrfs_file_extent_item);
811 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
812 btrfs_set_file_extent_type(leaf, fi, extent_type);
813 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
814 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
815 btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
816 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
817 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
818 btrfs_set_file_extent_compression(leaf, fi, 0);
819 btrfs_set_file_extent_encryption(leaf, fi, 0);
820 btrfs_set_file_extent_other_encoding(leaf, fi, 0);
821 done:
822 btrfs_mark_buffer_dirty(leaf);
823
824 release:
825 btrfs_release_path(root, path);
826 if (split_end && split == start) {
827 split = end;
828 goto again;
829 }
830 if (locked_end > end) {
831 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
832 GFP_NOFS);
833 }
834 btrfs_free_path(path);
835 return 0;
836 }
837
838 /*
839 * this gets pages into the page cache and locks them down, it also properly
840 * waits for data=ordered extents to finish before allowing the pages to be
841 * modified.
842 */
843 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
844 struct page **pages, size_t num_pages,
845 loff_t pos, unsigned long first_index,
846 unsigned long last_index, size_t write_bytes)
847 {
848 int i;
849 unsigned long index = pos >> PAGE_CACHE_SHIFT;
850 struct inode *inode = fdentry(file)->d_inode;
851 int err = 0;
852 u64 start_pos;
853 u64 last_pos;
854
855 start_pos = pos & ~((u64)root->sectorsize - 1);
856 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
857
858 if (start_pos > inode->i_size) {
859 err = btrfs_cont_expand(inode, start_pos);
860 if (err)
861 return err;
862 }
863
864 memset(pages, 0, num_pages * sizeof(struct page *));
865 again:
866 for (i = 0; i < num_pages; i++) {
867 pages[i] = grab_cache_page(inode->i_mapping, index + i);
868 if (!pages[i]) {
869 err = -ENOMEM;
870 BUG_ON(1);
871 }
872 wait_on_page_writeback(pages[i]);
873 }
874 if (start_pos < inode->i_size) {
875 struct btrfs_ordered_extent *ordered;
876 lock_extent(&BTRFS_I(inode)->io_tree,
877 start_pos, last_pos - 1, GFP_NOFS);
878 ordered = btrfs_lookup_first_ordered_extent(inode,
879 last_pos - 1);
880 if (ordered &&
881 ordered->file_offset + ordered->len > start_pos &&
882 ordered->file_offset < last_pos) {
883 btrfs_put_ordered_extent(ordered);
884 unlock_extent(&BTRFS_I(inode)->io_tree,
885 start_pos, last_pos - 1, GFP_NOFS);
886 for (i = 0; i < num_pages; i++) {
887 unlock_page(pages[i]);
888 page_cache_release(pages[i]);
889 }
890 btrfs_wait_ordered_range(inode, start_pos,
891 last_pos - start_pos);
892 goto again;
893 }
894 if (ordered)
895 btrfs_put_ordered_extent(ordered);
896
897 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
898 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
899 GFP_NOFS);
900 unlock_extent(&BTRFS_I(inode)->io_tree,
901 start_pos, last_pos - 1, GFP_NOFS);
902 }
903 for (i = 0; i < num_pages; i++) {
904 clear_page_dirty_for_io(pages[i]);
905 set_page_extent_mapped(pages[i]);
906 WARN_ON(!PageLocked(pages[i]));
907 }
908 return 0;
909 }
910
911 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
912 size_t count, loff_t *ppos)
913 {
914 loff_t pos;
915 loff_t start_pos;
916 ssize_t num_written = 0;
917 ssize_t err = 0;
918 int ret = 0;
919 struct inode *inode = fdentry(file)->d_inode;
920 struct btrfs_root *root = BTRFS_I(inode)->root;
921 struct page **pages = NULL;
922 int nrptrs;
923 struct page *pinned[2];
924 unsigned long first_index;
925 unsigned long last_index;
926 int will_write;
927
928 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
929 (file->f_flags & O_DIRECT));
930
931 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
932 PAGE_CACHE_SIZE / (sizeof(struct page *)));
933 pinned[0] = NULL;
934 pinned[1] = NULL;
935
936 pos = *ppos;
937 start_pos = pos;
938
939 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
940 current->backing_dev_info = inode->i_mapping->backing_dev_info;
941 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
942 if (err)
943 goto out_nolock;
944 if (count == 0)
945 goto out_nolock;
946
947 err = file_remove_suid(file);
948 if (err)
949 goto out_nolock;
950 file_update_time(file);
951
952 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
953
954 mutex_lock(&inode->i_mutex);
955 BTRFS_I(inode)->sequence++;
956 first_index = pos >> PAGE_CACHE_SHIFT;
957 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
958
959 /*
960 * there are lots of better ways to do this, but this code
961 * makes sure the first and last page in the file range are
962 * up to date and ready for cow
963 */
964 if ((pos & (PAGE_CACHE_SIZE - 1))) {
965 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
966 if (!PageUptodate(pinned[0])) {
967 ret = btrfs_readpage(NULL, pinned[0]);
968 BUG_ON(ret);
969 wait_on_page_locked(pinned[0]);
970 } else {
971 unlock_page(pinned[0]);
972 }
973 }
974 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
975 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
976 if (!PageUptodate(pinned[1])) {
977 ret = btrfs_readpage(NULL, pinned[1]);
978 BUG_ON(ret);
979 wait_on_page_locked(pinned[1]);
980 } else {
981 unlock_page(pinned[1]);
982 }
983 }
984
985 while (count > 0) {
986 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
987 size_t write_bytes = min(count, nrptrs *
988 (size_t)PAGE_CACHE_SIZE -
989 offset);
990 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
991 PAGE_CACHE_SHIFT;
992
993 WARN_ON(num_pages > nrptrs);
994 memset(pages, 0, sizeof(struct page *) * nrptrs);
995
996 ret = btrfs_check_data_free_space(root, inode, write_bytes);
997 if (ret)
998 goto out;
999
1000 ret = prepare_pages(root, file, pages, num_pages,
1001 pos, first_index, last_index,
1002 write_bytes);
1003 if (ret) {
1004 btrfs_free_reserved_data_space(root, inode,
1005 write_bytes);
1006 goto out;
1007 }
1008
1009 ret = btrfs_copy_from_user(pos, num_pages,
1010 write_bytes, pages, buf);
1011 if (ret) {
1012 btrfs_free_reserved_data_space(root, inode,
1013 write_bytes);
1014 btrfs_drop_pages(pages, num_pages);
1015 goto out;
1016 }
1017
1018 ret = dirty_and_release_pages(NULL, root, file, pages,
1019 num_pages, pos, write_bytes);
1020 btrfs_drop_pages(pages, num_pages);
1021 if (ret) {
1022 btrfs_free_reserved_data_space(root, inode,
1023 write_bytes);
1024 goto out;
1025 }
1026
1027 if (will_write) {
1028 btrfs_fdatawrite_range(inode->i_mapping, pos,
1029 pos + write_bytes - 1,
1030 WB_SYNC_ALL);
1031 } else {
1032 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1033 num_pages);
1034 if (num_pages <
1035 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1036 btrfs_btree_balance_dirty(root, 1);
1037 btrfs_throttle(root);
1038 }
1039
1040 buf += write_bytes;
1041 count -= write_bytes;
1042 pos += write_bytes;
1043 num_written += write_bytes;
1044
1045 cond_resched();
1046 }
1047 out:
1048 mutex_unlock(&inode->i_mutex);
1049 if (ret)
1050 err = ret;
1051
1052 out_nolock:
1053 kfree(pages);
1054 if (pinned[0])
1055 page_cache_release(pinned[0]);
1056 if (pinned[1])
1057 page_cache_release(pinned[1]);
1058 *ppos = pos;
1059
1060 /*
1061 * we want to make sure fsync finds this change
1062 * but we haven't joined a transaction running right now.
1063 *
1064 * Later on, someone is sure to update the inode and get the
1065 * real transid recorded.
1066 *
1067 * We set last_trans now to the fs_info generation + 1,
1068 * this will either be one more than the running transaction
1069 * or the generation used for the next transaction if there isn't
1070 * one running right now.
1071 */
1072 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1073
1074 if (num_written > 0 && will_write) {
1075 struct btrfs_trans_handle *trans;
1076
1077 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1078 if (err)
1079 num_written = err;
1080
1081 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1082 trans = btrfs_start_transaction(root, 1);
1083 ret = btrfs_log_dentry_safe(trans, root,
1084 file->f_dentry);
1085 if (ret == 0) {
1086 ret = btrfs_sync_log(trans, root);
1087 if (ret == 0)
1088 btrfs_end_transaction(trans, root);
1089 else
1090 btrfs_commit_transaction(trans, root);
1091 } else {
1092 btrfs_commit_transaction(trans, root);
1093 }
1094 }
1095 if (file->f_flags & O_DIRECT) {
1096 invalidate_mapping_pages(inode->i_mapping,
1097 start_pos >> PAGE_CACHE_SHIFT,
1098 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1099 }
1100 }
1101 current->backing_dev_info = NULL;
1102 return num_written ? num_written : err;
1103 }
1104
1105 int btrfs_release_file(struct inode *inode, struct file *filp)
1106 {
1107 /*
1108 * ordered_data_close is set by settattr when we are about to truncate
1109 * a file from a non-zero size to a zero size. This tries to
1110 * flush down new bytes that may have been written if the
1111 * application were using truncate to replace a file in place.
1112 */
1113 if (BTRFS_I(inode)->ordered_data_close) {
1114 BTRFS_I(inode)->ordered_data_close = 0;
1115 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1116 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1117 filemap_flush(inode->i_mapping);
1118 }
1119 if (filp->private_data)
1120 btrfs_ioctl_trans_end(filp);
1121 return 0;
1122 }
1123
1124 /*
1125 * fsync call for both files and directories. This logs the inode into
1126 * the tree log instead of forcing full commits whenever possible.
1127 *
1128 * It needs to call filemap_fdatawait so that all ordered extent updates are
1129 * in the metadata btree are up to date for copying to the log.
1130 *
1131 * It drops the inode mutex before doing the tree log commit. This is an
1132 * important optimization for directories because holding the mutex prevents
1133 * new operations on the dir while we write to disk.
1134 */
1135 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1136 {
1137 struct inode *inode = dentry->d_inode;
1138 struct btrfs_root *root = BTRFS_I(inode)->root;
1139 int ret = 0;
1140 struct btrfs_trans_handle *trans;
1141
1142 /*
1143 * check the transaction that last modified this inode
1144 * and see if its already been committed
1145 */
1146 if (!BTRFS_I(inode)->last_trans)
1147 goto out;
1148
1149 mutex_lock(&root->fs_info->trans_mutex);
1150 if (BTRFS_I(inode)->last_trans <=
1151 root->fs_info->last_trans_committed) {
1152 BTRFS_I(inode)->last_trans = 0;
1153 mutex_unlock(&root->fs_info->trans_mutex);
1154 goto out;
1155 }
1156 mutex_unlock(&root->fs_info->trans_mutex);
1157
1158 root->log_batch++;
1159 filemap_fdatawrite(inode->i_mapping);
1160 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1161 root->log_batch++;
1162
1163 if (datasync && !(inode->i_state & I_DIRTY_PAGES))
1164 goto out;
1165 /*
1166 * ok we haven't committed the transaction yet, lets do a commit
1167 */
1168 if (file && file->private_data)
1169 btrfs_ioctl_trans_end(file);
1170
1171 trans = btrfs_start_transaction(root, 1);
1172 if (!trans) {
1173 ret = -ENOMEM;
1174 goto out;
1175 }
1176
1177 ret = btrfs_log_dentry_safe(trans, root, dentry);
1178 if (ret < 0)
1179 goto out;
1180
1181 /* we've logged all the items and now have a consistent
1182 * version of the file in the log. It is possible that
1183 * someone will come in and modify the file, but that's
1184 * fine because the log is consistent on disk, and we
1185 * have references to all of the file's extents
1186 *
1187 * It is possible that someone will come in and log the
1188 * file again, but that will end up using the synchronization
1189 * inside btrfs_sync_log to keep things safe.
1190 */
1191 mutex_unlock(&dentry->d_inode->i_mutex);
1192
1193 if (ret > 0) {
1194 ret = btrfs_commit_transaction(trans, root);
1195 } else {
1196 ret = btrfs_sync_log(trans, root);
1197 if (ret == 0)
1198 ret = btrfs_end_transaction(trans, root);
1199 else
1200 ret = btrfs_commit_transaction(trans, root);
1201 }
1202 mutex_lock(&dentry->d_inode->i_mutex);
1203 out:
1204 return ret > 0 ? EIO : ret;
1205 }
1206
1207 static struct vm_operations_struct btrfs_file_vm_ops = {
1208 .fault = filemap_fault,
1209 .page_mkwrite = btrfs_page_mkwrite,
1210 };
1211
1212 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1213 {
1214 vma->vm_ops = &btrfs_file_vm_ops;
1215 file_accessed(filp);
1216 return 0;
1217 }
1218
1219 struct file_operations btrfs_file_operations = {
1220 .llseek = generic_file_llseek,
1221 .read = do_sync_read,
1222 .aio_read = generic_file_aio_read,
1223 .splice_read = generic_file_splice_read,
1224 .write = btrfs_file_write,
1225 .mmap = btrfs_file_mmap,
1226 .open = generic_file_open,
1227 .release = btrfs_release_file,
1228 .fsync = btrfs_sync_file,
1229 .unlocked_ioctl = btrfs_ioctl,
1230 #ifdef CONFIG_COMPAT
1231 .compat_ioctl = btrfs_ioctl,
1232 #endif
1233 };
This page took 0.106647 seconds and 6 git commands to generate.