2 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
3 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
4 * Akira Fujita <a-fujita@rs.jp.nec.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/quotaops.h>
18 #include <linux/slab.h>
19 #include "ext4_jbd2.h"
21 #include "ext4_extents.h"
24 * get_ext_path - Find an extent path for designated logical block number.
26 * @inode: an inode which is searched
27 * @lblock: logical block number to find an extent path
28 * @path: pointer to an extent path pointer (for output)
30 * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value
34 get_ext_path(struct inode
*inode
, ext4_lblk_t lblock
,
35 struct ext4_ext_path
**orig_path
)
38 struct ext4_ext_path
*path
;
40 path
= ext4_ext_find_extent(inode
, lblock
, orig_path
, EXT4_EX_NOCACHE
);
43 else if (path
[ext_depth(inode
)].p_ext
== NULL
)
52 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
55 * Acquire write lock of i_data_sem of the two inodes
58 ext4_double_down_write_data_sem(struct inode
*first
, struct inode
*second
)
61 down_write(&EXT4_I(first
)->i_data_sem
);
62 down_write_nested(&EXT4_I(second
)->i_data_sem
, SINGLE_DEPTH_NESTING
);
64 down_write(&EXT4_I(second
)->i_data_sem
);
65 down_write_nested(&EXT4_I(first
)->i_data_sem
, SINGLE_DEPTH_NESTING
);
71 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
73 * @orig_inode: original inode structure to be released its lock first
74 * @donor_inode: donor inode structure to be released its lock second
75 * Release write lock of i_data_sem of two inodes (orig and donor).
78 ext4_double_up_write_data_sem(struct inode
*orig_inode
,
79 struct inode
*donor_inode
)
81 up_write(&EXT4_I(orig_inode
)->i_data_sem
);
82 up_write(&EXT4_I(donor_inode
)->i_data_sem
);
86 * mext_check_coverage - Check that all extents in range has the same type
88 * @inode: inode in question
89 * @from: block offset of inode
90 * @count: block count to be checked
91 * @unwritten: extents expected to be unwritten
92 * @err: pointer to save error value
94 * Return 1 if all extents in range has expected type, and zero otherwise.
97 mext_check_coverage(struct inode
*inode
, ext4_lblk_t from
, ext4_lblk_t count
,
98 int unwritten
, int *err
)
100 struct ext4_ext_path
*path
= NULL
;
101 struct ext4_extent
*ext
;
103 ext4_lblk_t last
= from
+ count
;
104 while (from
< last
) {
105 *err
= get_ext_path(inode
, from
, &path
);
108 ext
= path
[ext_depth(inode
)].p_ext
;
109 if (unwritten
!= ext4_ext_is_unwritten(ext
))
111 from
+= ext4_ext_get_actual_len(ext
);
112 ext4_ext_drop_refs(path
);
116 ext4_ext_drop_refs(path
);
122 * mext_replace_branches - Replace original extents with new extents
124 * @handle: journal handle
125 * @orig_inode: original inode
126 * @donor_inode: donor inode
127 * @from: block offset of orig_inode
128 * @count: block count to be replaced
129 * @err: pointer to save return value
131 * Replace original inode extents and donor inode extents page by page.
132 * We implement this replacement in the following three steps:
133 * 1. Save the block information of original and donor inodes into
135 * 2. Change the block information of original inode to point at the
136 * donor inode blocks.
137 * 3. Change the block information of donor inode to point at the saved
138 * original inode blocks in the dummy extents.
140 * Return replaced block count.
144 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
146 * @inode1: the inode structure
147 * @inode2: the inode structure
149 * @page: result page vector
151 * Grab two locked pages for inode's by inode order
154 mext_page_double_lock(struct inode
*inode1
, struct inode
*inode2
,
155 pgoff_t index1
, pgoff_t index2
, struct page
*page
[2])
157 struct address_space
*mapping
[2];
158 unsigned fl
= AOP_FLAG_NOFS
;
160 BUG_ON(!inode1
|| !inode2
);
161 if (inode1
< inode2
) {
162 mapping
[0] = inode1
->i_mapping
;
163 mapping
[1] = inode2
->i_mapping
;
165 pgoff_t tmp
= index1
;
168 mapping
[0] = inode2
->i_mapping
;
169 mapping
[1] = inode1
->i_mapping
;
172 page
[0] = grab_cache_page_write_begin(mapping
[0], index1
, fl
);
176 page
[1] = grab_cache_page_write_begin(mapping
[1], index2
, fl
);
178 unlock_page(page
[0]);
179 page_cache_release(page
[0]);
183 * grab_cache_page_write_begin() may not wait on page's writeback if
184 * BDI not demand that. But it is reasonable to be very conservative
185 * here and explicitly wait on page's writeback
187 wait_on_page_writeback(page
[0]);
188 wait_on_page_writeback(page
[1]);
189 if (inode1
> inode2
) {
198 /* Force page buffers uptodate w/o dropping page's lock */
200 mext_page_mkuptodate(struct page
*page
, unsigned from
, unsigned to
)
202 struct inode
*inode
= page
->mapping
->host
;
204 struct buffer_head
*bh
, *head
, *arr
[MAX_BUF_PER_PAGE
];
205 unsigned int blocksize
, block_start
, block_end
;
206 int i
, err
, nr
= 0, partial
= 0;
207 BUG_ON(!PageLocked(page
));
208 BUG_ON(PageWriteback(page
));
210 if (PageUptodate(page
))
213 blocksize
= 1 << inode
->i_blkbits
;
214 if (!page_has_buffers(page
))
215 create_empty_buffers(page
, blocksize
, 0);
217 head
= page_buffers(page
);
218 block
= (sector_t
)page
->index
<< (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
219 for (bh
= head
, block_start
= 0; bh
!= head
|| !block_start
;
220 block
++, block_start
= block_end
, bh
= bh
->b_this_page
) {
221 block_end
= block_start
+ blocksize
;
222 if (block_end
<= from
|| block_start
>= to
) {
223 if (!buffer_uptodate(bh
))
227 if (buffer_uptodate(bh
))
229 if (!buffer_mapped(bh
)) {
230 err
= ext4_get_block(inode
, block
, bh
, 0);
235 if (!buffer_mapped(bh
)) {
236 zero_user(page
, block_start
, blocksize
);
237 set_buffer_uptodate(bh
);
241 BUG_ON(nr
>= MAX_BUF_PER_PAGE
);
248 for (i
= 0; i
< nr
; i
++) {
250 if (!bh_uptodate_or_lock(bh
)) {
251 err
= bh_submit_read(bh
);
258 SetPageUptodate(page
);
263 * move_extent_per_page - Move extent data per page
265 * @o_filp: file structure of original file
266 * @donor_inode: donor inode
267 * @orig_page_offset: page index on original file
268 * @data_offset_in_page: block index where data swapping starts
269 * @block_len_in_page: the number of blocks to be swapped
270 * @unwritten: orig extent is unwritten or not
271 * @err: pointer to save return value
273 * Save the data in original inode blocks and replace original inode extents
274 * with donor inode extents by calling mext_replace_branches().
275 * Finally, write out the saved data in new original inode blocks. Return
276 * replaced block count.
279 move_extent_per_page(struct file
*o_filp
, struct inode
*donor_inode
,
280 pgoff_t orig_page_offset
, pgoff_t donor_page_offset
,
281 int data_offset_in_page
,
282 int block_len_in_page
, int unwritten
, int *err
)
284 struct inode
*orig_inode
= file_inode(o_filp
);
285 struct page
*pagep
[2] = {NULL
, NULL
};
287 ext4_lblk_t orig_blk_offset
, donor_blk_offset
;
288 unsigned long blocksize
= orig_inode
->i_sb
->s_blocksize
;
289 unsigned int w_flags
= 0;
290 unsigned int tmp_data_size
, data_size
, replaced_size
;
291 int err2
, jblocks
, retries
= 0;
292 int replaced_count
= 0;
293 int from
= data_offset_in_page
<< orig_inode
->i_blkbits
;
294 int blocks_per_page
= PAGE_CACHE_SIZE
>> orig_inode
->i_blkbits
;
297 * It needs twice the amount of ordinary journal buffers because
298 * inode and donor_inode may change each different metadata blocks.
302 jblocks
= ext4_writepage_trans_blocks(orig_inode
) * 2;
303 handle
= ext4_journal_start(orig_inode
, EXT4_HT_MOVE_EXTENTS
, jblocks
);
304 if (IS_ERR(handle
)) {
305 *err
= PTR_ERR(handle
);
309 if (segment_eq(get_fs(), KERNEL_DS
))
310 w_flags
|= AOP_FLAG_UNINTERRUPTIBLE
;
312 orig_blk_offset
= orig_page_offset
* blocks_per_page
+
315 donor_blk_offset
= donor_page_offset
* blocks_per_page
+
318 /* Calculate data_size */
319 if ((orig_blk_offset
+ block_len_in_page
- 1) ==
320 ((orig_inode
->i_size
- 1) >> orig_inode
->i_blkbits
)) {
321 /* Replace the last block */
322 tmp_data_size
= orig_inode
->i_size
& (blocksize
- 1);
324 * If data_size equal zero, it shows data_size is multiples of
325 * blocksize. So we set appropriate value.
327 if (tmp_data_size
== 0)
328 tmp_data_size
= blocksize
;
330 data_size
= tmp_data_size
+
331 ((block_len_in_page
- 1) << orig_inode
->i_blkbits
);
333 data_size
= block_len_in_page
<< orig_inode
->i_blkbits
;
335 replaced_size
= data_size
;
337 *err
= mext_page_double_lock(orig_inode
, donor_inode
, orig_page_offset
,
338 donor_page_offset
, pagep
);
339 if (unlikely(*err
< 0))
342 * If orig extent was unwritten it can become initialized
343 * at any time after i_data_sem was dropped, in order to
344 * serialize with delalloc we have recheck extent while we
345 * hold page's lock, if it is still the case data copy is not
346 * necessary, just swap data blocks between orig and donor.
349 ext4_double_down_write_data_sem(orig_inode
, donor_inode
);
350 /* If any of extents in range became initialized we have to
351 * fallback to data copying */
352 unwritten
= mext_check_coverage(orig_inode
, orig_blk_offset
,
353 block_len_in_page
, 1, err
);
357 unwritten
&= mext_check_coverage(donor_inode
, donor_blk_offset
,
358 block_len_in_page
, 1, err
);
363 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
366 if ((page_has_private(pagep
[0]) &&
367 !try_to_release_page(pagep
[0], 0)) ||
368 (page_has_private(pagep
[1]) &&
369 !try_to_release_page(pagep
[1], 0))) {
373 replaced_count
= ext4_swap_extents(handle
, orig_inode
,
374 donor_inode
, orig_blk_offset
,
376 block_len_in_page
, 1, err
);
378 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
382 *err
= mext_page_mkuptodate(pagep
[0], from
, from
+ replaced_size
);
386 /* At this point all buffers in range are uptodate, old mapping layout
387 * is no longer required, try to drop it now. */
388 if ((page_has_private(pagep
[0]) && !try_to_release_page(pagep
[0], 0)) ||
389 (page_has_private(pagep
[1]) && !try_to_release_page(pagep
[1], 0))) {
393 ext4_double_down_write_data_sem(orig_inode
, donor_inode
);
394 replaced_count
= ext4_swap_extents(handle
, orig_inode
, donor_inode
,
395 orig_blk_offset
, donor_blk_offset
,
396 block_len_in_page
, 1, err
);
397 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
399 if (replaced_count
) {
400 block_len_in_page
= replaced_count
;
402 block_len_in_page
<< orig_inode
->i_blkbits
;
406 /* Perform all necessary steps similar write_begin()/write_end()
407 * but keeping in mind that i_size will not change */
408 *err
= __block_write_begin(pagep
[0], from
, replaced_size
,
411 *err
= block_commit_write(pagep
[0], from
, from
+ replaced_size
);
413 if (unlikely(*err
< 0))
414 goto repair_branches
;
416 /* Even in case of data=writeback it is reasonable to pin
417 * inode to transaction, to prevent unexpected data loss */
418 *err
= ext4_jbd2_file_inode(handle
, orig_inode
);
421 unlock_page(pagep
[0]);
422 page_cache_release(pagep
[0]);
423 unlock_page(pagep
[1]);
424 page_cache_release(pagep
[1]);
426 ext4_journal_stop(handle
);
427 /* Buffer was busy because probably is pinned to journal transaction,
428 * force transaction commit may help to free it. */
429 if (*err
== -EBUSY
&& ext4_should_retry_alloc(orig_inode
->i_sb
,
432 return replaced_count
;
436 * This should never ever happen!
437 * Extents are swapped already, but we are not able to copy data.
438 * Try to swap extents to it's original places
440 ext4_double_down_write_data_sem(orig_inode
, donor_inode
);
441 replaced_count
= ext4_swap_extents(handle
, donor_inode
, orig_inode
,
442 orig_blk_offset
, donor_blk_offset
,
443 block_len_in_page
, 0, &err2
);
444 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
445 if (replaced_count
!= block_len_in_page
) {
446 EXT4_ERROR_INODE_BLOCK(orig_inode
, (sector_t
)(orig_blk_offset
),
447 "Unable to copy data block,"
448 " data will be lost.");
456 * mext_check_arguments - Check whether move extent can be done
458 * @orig_inode: original inode
459 * @donor_inode: donor inode
460 * @orig_start: logical start offset in block for orig
461 * @donor_start: logical start offset in block for donor
462 * @len: the number of blocks to be moved
464 * Check the arguments of ext4_move_extents() whether the files can be
465 * exchanged with each other.
466 * Return 0 on success, or a negative error value on failure.
469 mext_check_arguments(struct inode
*orig_inode
,
470 struct inode
*donor_inode
, __u64 orig_start
,
471 __u64 donor_start
, __u64
*len
)
473 __u64 orig_eof
, donor_eof
;
474 unsigned int blkbits
= orig_inode
->i_blkbits
;
475 unsigned int blocksize
= 1 << blkbits
;
477 orig_eof
= (i_size_read(orig_inode
) + blocksize
- 1) >> blkbits
;
478 donor_eof
= (i_size_read(donor_inode
) + blocksize
- 1) >> blkbits
;
481 if (donor_inode
->i_mode
& (S_ISUID
|S_ISGID
)) {
482 ext4_debug("ext4 move extent: suid or sgid is set"
483 " to donor file [ino:orig %lu, donor %lu]\n",
484 orig_inode
->i_ino
, donor_inode
->i_ino
);
488 if (IS_IMMUTABLE(donor_inode
) || IS_APPEND(donor_inode
))
491 /* Ext4 move extent does not support swapfile */
492 if (IS_SWAPFILE(orig_inode
) || IS_SWAPFILE(donor_inode
)) {
493 ext4_debug("ext4 move extent: The argument files should "
494 "not be swapfile [ino:orig %lu, donor %lu]\n",
495 orig_inode
->i_ino
, donor_inode
->i_ino
);
499 /* Ext4 move extent supports only extent based file */
500 if (!(ext4_test_inode_flag(orig_inode
, EXT4_INODE_EXTENTS
))) {
501 ext4_debug("ext4 move extent: orig file is not extents "
502 "based file [ino:orig %lu]\n", orig_inode
->i_ino
);
504 } else if (!(ext4_test_inode_flag(donor_inode
, EXT4_INODE_EXTENTS
))) {
505 ext4_debug("ext4 move extent: donor file is not extents "
506 "based file [ino:donor %lu]\n", donor_inode
->i_ino
);
510 if ((!orig_inode
->i_size
) || (!donor_inode
->i_size
)) {
511 ext4_debug("ext4 move extent: File size is 0 byte\n");
515 /* Start offset should be same */
516 if ((orig_start
& ~(PAGE_MASK
>> orig_inode
->i_blkbits
)) !=
517 (donor_start
& ~(PAGE_MASK
>> orig_inode
->i_blkbits
))) {
518 ext4_debug("ext4 move extent: orig and donor's start "
519 "offset are not alligned [ino:orig %lu, donor %lu]\n",
520 orig_inode
->i_ino
, donor_inode
->i_ino
);
524 if ((orig_start
>= EXT_MAX_BLOCKS
) ||
525 (donor_start
>= EXT_MAX_BLOCKS
) ||
526 (*len
> EXT_MAX_BLOCKS
) ||
527 (donor_start
+ *len
>= EXT_MAX_BLOCKS
) ||
528 (orig_start
+ *len
>= EXT_MAX_BLOCKS
)) {
529 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
530 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS
,
531 orig_inode
->i_ino
, donor_inode
->i_ino
);
534 if (orig_eof
< orig_start
+ *len
- 1)
535 *len
= orig_eof
- orig_start
;
536 if (donor_eof
< donor_start
+ *len
- 1)
537 *len
= donor_eof
- donor_start
;
539 ext4_debug("ext4 move extent: len should not be 0 "
540 "[ino:orig %lu, donor %lu]\n", orig_inode
->i_ino
,
549 * ext4_move_extents - Exchange the specified range of a file
551 * @o_filp: file structure of the original file
552 * @d_filp: file structure of the donor file
553 * @orig_start: start offset in block for orig
554 * @donor_start: start offset in block for donor
555 * @len: the number of blocks to be moved
556 * @moved_len: moved block length
558 * This function returns 0 and moved block length is set in moved_len
559 * if succeed, otherwise returns error value.
561 * Note: ext4_move_extents() proceeds the following order.
562 * 1:ext4_move_extents() calculates the last block number of moving extent
563 * function by the start block number (orig_start) and the number of blocks
564 * to be moved (len) specified as arguments.
565 * If the {orig, donor}_start points a hole, the extent's start offset
566 * pointed by ext_cur (current extent), holecheck_path, orig_path are set
568 * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent
569 * or the ext_cur exceeds the block_end which is last logical block number.
570 * 3:To get the length of continues area, call mext_next_extent()
571 * specified with the ext_cur (initial value is holecheck_path) re-cursive,
572 * until find un-continuous extent, the start logical block number exceeds
573 * the block_end or the extent points to the last extent.
574 * 4:Exchange the original inode data with donor inode data
575 * from orig_page_offset to seq_end_page.
576 * The start indexes of data are specified as arguments.
577 * That of the original inode is orig_page_offset,
578 * and the donor inode is also orig_page_offset
579 * (To easily handle blocksize != pagesize case, the offset for the
580 * donor inode is block unit).
581 * 5:Update holecheck_path and orig_path to points a next proceeding extent,
582 * then returns to step 2.
583 * 6:Release holecheck_path, orig_path and set the len to moved_len
584 * which shows the number of moved blocks.
585 * The moved_len is useful for the command to calculate the file offset
586 * for starting next move extent ioctl.
587 * 7:Return 0 on success, or a negative error value on failure.
590 ext4_move_extents(struct file
*o_filp
, struct file
*d_filp
, __u64 orig_blk
,
591 __u64 donor_blk
, __u64 len
, __u64
*moved_len
)
593 struct inode
*orig_inode
= file_inode(o_filp
);
594 struct inode
*donor_inode
= file_inode(d_filp
);
595 struct ext4_ext_path
*path
= NULL
;
596 int blocks_per_page
= PAGE_CACHE_SIZE
>> orig_inode
->i_blkbits
;
597 ext4_lblk_t o_end
, o_start
= orig_blk
;
598 ext4_lblk_t d_start
= donor_blk
;
601 if (orig_inode
->i_sb
!= donor_inode
->i_sb
) {
602 ext4_debug("ext4 move extent: The argument files "
603 "should be in same FS [ino:orig %lu, donor %lu]\n",
604 orig_inode
->i_ino
, donor_inode
->i_ino
);
608 /* orig and donor should be different inodes */
609 if (orig_inode
== donor_inode
) {
610 ext4_debug("ext4 move extent: The argument files should not "
611 "be same inode [ino:orig %lu, donor %lu]\n",
612 orig_inode
->i_ino
, donor_inode
->i_ino
);
616 /* Regular file check */
617 if (!S_ISREG(orig_inode
->i_mode
) || !S_ISREG(donor_inode
->i_mode
)) {
618 ext4_debug("ext4 move extent: The argument files should be "
619 "regular file [ino:orig %lu, donor %lu]\n",
620 orig_inode
->i_ino
, donor_inode
->i_ino
);
623 /* TODO: This is non obvious task to swap blocks for inodes with full
625 if (ext4_should_journal_data(orig_inode
) ||
626 ext4_should_journal_data(donor_inode
)) {
629 /* Protect orig and donor inodes against a truncate */
630 lock_two_nondirectories(orig_inode
, donor_inode
);
632 /* Wait for all existing dio workers */
633 ext4_inode_block_unlocked_dio(orig_inode
);
634 ext4_inode_block_unlocked_dio(donor_inode
);
635 inode_dio_wait(orig_inode
);
636 inode_dio_wait(donor_inode
);
638 /* Protect extent tree against block allocations via delalloc */
639 ext4_double_down_write_data_sem(orig_inode
, donor_inode
);
640 /* Check the filesystem environment whether move_extent can be done */
641 ret
= mext_check_arguments(orig_inode
, donor_inode
, orig_blk
,
645 o_end
= o_start
+ len
;
647 while (o_start
< o_end
) {
648 struct ext4_extent
*ex
;
649 ext4_lblk_t cur_blk
, next_blk
;
650 pgoff_t orig_page_index
, donor_page_index
;
652 int unwritten
, cur_len
;
654 ret
= get_ext_path(orig_inode
, o_start
, &path
);
657 ex
= path
[path
->p_depth
].p_ext
;
658 next_blk
= ext4_ext_next_allocated_block(path
);
659 cur_blk
= le32_to_cpu(ex
->ee_block
);
660 cur_len
= ext4_ext_get_actual_len(ex
);
661 /* Check hole before the start pos */
662 if (cur_blk
+ cur_len
- 1 < o_start
) {
663 if (next_blk
== EXT_MAX_BLOCKS
) {
668 d_start
+= next_blk
- o_start
;
671 /* Check hole after the start pos */
672 } else if (cur_blk
> o_start
) {
674 d_start
+= cur_blk
- o_start
;
676 /* Extent inside requested range ?*/
677 if (cur_blk
>= o_end
)
679 } else { /* in_range(o_start, o_blk, o_len) */
680 cur_len
+= cur_blk
- o_start
;
682 unwritten
= ext4_ext_is_unwritten(ex
);
683 if (o_end
- o_start
< cur_len
)
684 cur_len
= o_end
- o_start
;
686 orig_page_index
= o_start
>> (PAGE_CACHE_SHIFT
-
687 orig_inode
->i_blkbits
);
688 donor_page_index
= d_start
>> (PAGE_CACHE_SHIFT
-
689 donor_inode
->i_blkbits
);
690 offset_in_page
= o_start
% blocks_per_page
;
691 if (cur_len
> blocks_per_page
- offset_in_page
)
692 cur_len
= blocks_per_page
- offset_in_page
;
694 * Up semaphore to avoid following problems:
695 * a. transaction deadlock among ext4_journal_start,
696 * ->write_begin via pagefault, and jbd2_journal_commit
697 * b. racing with ->readpage, ->write_begin, and ext4_get_block
698 * in move_extent_per_page
700 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
701 /* Swap original branches with new branches */
702 move_extent_per_page(o_filp
, donor_inode
,
703 orig_page_index
, donor_page_index
,
704 offset_in_page
, cur_len
,
706 ext4_double_down_write_data_sem(orig_inode
, donor_inode
);
712 ext4_ext_drop_refs(path
);
716 *moved_len
= o_start
- orig_blk
;
717 if (*moved_len
> len
)
722 ext4_discard_preallocations(orig_inode
);
723 ext4_discard_preallocations(donor_inode
);
726 ext4_ext_drop_refs(path
);
728 ext4_double_up_write_data_sem(orig_inode
, donor_inode
);
729 ext4_inode_resume_unlocked_dio(orig_inode
);
730 ext4_inode_resume_unlocked_dio(donor_inode
);
731 unlock_two_nondirectories(orig_inode
, donor_inode
);