ext4: switch the guts of ->direct_IO() to iov_iter
[deliverable/linux.git] / fs / ext4 / indirect.c
CommitLineData
dae1e52c
AG
1/*
2 * linux/fs/ext4/indirect.c
3 *
4 * from
5 *
6 * linux/fs/ext4/inode.c
7 *
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
12 *
13 * from
14 *
15 * linux/fs/minix/inode.c
16 *
17 * Copyright (C) 1991, 1992 Linus Torvalds
18 *
19 * Goal-directed block allocation by Stephen Tweedie
20 * (sct@redhat.com), 1993, 1998
21 */
22
a27bb332 23#include <linux/aio.h>
dae1e52c
AG
24#include "ext4_jbd2.h"
25#include "truncate.h"
26
27#include <trace/events/ext4.h>
28
29typedef struct {
30 __le32 *p;
31 __le32 key;
32 struct buffer_head *bh;
33} Indirect;
34
35static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
36{
37 p->key = *(p->p = v);
38 p->bh = bh;
39}
40
41/**
42 * ext4_block_to_path - parse the block number into array of offsets
43 * @inode: inode in question (we are only interested in its superblock)
44 * @i_block: block number to be parsed
45 * @offsets: array to store the offsets in
46 * @boundary: set this non-zero if the referred-to block is likely to be
47 * followed (on disk) by an indirect block.
48 *
49 * To store the locations of file's data ext4 uses a data structure common
50 * for UNIX filesystems - tree of pointers anchored in the inode, with
51 * data blocks at leaves and indirect blocks in intermediate nodes.
52 * This function translates the block number into path in that tree -
53 * return value is the path length and @offsets[n] is the offset of
54 * pointer to (n+1)th node in the nth one. If @block is out of range
55 * (negative or too large) warning is printed and zero returned.
56 *
57 * Note: function doesn't find node addresses, so no IO is needed. All
58 * we need to know is the capacity of indirect blocks (taken from the
59 * inode->i_sb).
60 */
61
62/*
63 * Portability note: the last comparison (check that we fit into triple
64 * indirect block) is spelled differently, because otherwise on an
65 * architecture with 32-bit longs and 8Kb pages we might get into trouble
66 * if our filesystem had 8Kb blocks. We might use long long, but that would
67 * kill us on x86. Oh, well, at least the sign propagation does not matter -
68 * i_block would have to be negative in the very beginning, so we would not
69 * get there at all.
70 */
71
72static int ext4_block_to_path(struct inode *inode,
73 ext4_lblk_t i_block,
74 ext4_lblk_t offsets[4], int *boundary)
75{
76 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
77 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
78 const long direct_blocks = EXT4_NDIR_BLOCKS,
79 indirect_blocks = ptrs,
80 double_blocks = (1 << (ptrs_bits * 2));
81 int n = 0;
82 int final = 0;
83
84 if (i_block < direct_blocks) {
85 offsets[n++] = i_block;
86 final = direct_blocks;
87 } else if ((i_block -= direct_blocks) < indirect_blocks) {
88 offsets[n++] = EXT4_IND_BLOCK;
89 offsets[n++] = i_block;
90 final = ptrs;
91 } else if ((i_block -= indirect_blocks) < double_blocks) {
92 offsets[n++] = EXT4_DIND_BLOCK;
93 offsets[n++] = i_block >> ptrs_bits;
94 offsets[n++] = i_block & (ptrs - 1);
95 final = ptrs;
96 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
97 offsets[n++] = EXT4_TIND_BLOCK;
98 offsets[n++] = i_block >> (ptrs_bits * 2);
99 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
100 offsets[n++] = i_block & (ptrs - 1);
101 final = ptrs;
102 } else {
103 ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
104 i_block + direct_blocks +
105 indirect_blocks + double_blocks, inode->i_ino);
106 }
107 if (boundary)
108 *boundary = final - 1 - (i_block & (ptrs - 1));
109 return n;
110}
111
112/**
113 * ext4_get_branch - read the chain of indirect blocks leading to data
114 * @inode: inode in question
115 * @depth: depth of the chain (1 - direct pointer, etc.)
116 * @offsets: offsets of pointers in inode/indirect blocks
117 * @chain: place to store the result
118 * @err: here we store the error value
119 *
120 * Function fills the array of triples <key, p, bh> and returns %NULL
121 * if everything went OK or the pointer to the last filled triple
122 * (incomplete one) otherwise. Upon the return chain[i].key contains
123 * the number of (i+1)-th block in the chain (as it is stored in memory,
124 * i.e. little-endian 32-bit), chain[i].p contains the address of that
125 * number (it points into struct inode for i==0 and into the bh->b_data
126 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
127 * block for i>0 and NULL for i==0. In other words, it holds the block
128 * numbers of the chain, addresses they were taken from (and where we can
129 * verify that chain did not change) and buffer_heads hosting these
130 * numbers.
131 *
132 * Function stops when it stumbles upon zero pointer (absent block)
133 * (pointer to last triple returned, *@err == 0)
134 * or when it gets an IO error reading an indirect block
135 * (ditto, *@err == -EIO)
136 * or when it reads all @depth-1 indirect blocks successfully and finds
137 * the whole chain, all way to the data (returns %NULL, *err == 0).
138 *
139 * Need to be called with
140 * down_read(&EXT4_I(inode)->i_data_sem)
141 */
142static Indirect *ext4_get_branch(struct inode *inode, int depth,
143 ext4_lblk_t *offsets,
144 Indirect chain[4], int *err)
145{
146 struct super_block *sb = inode->i_sb;
147 Indirect *p = chain;
148 struct buffer_head *bh;
860d21e2 149 int ret = -EIO;
dae1e52c
AG
150
151 *err = 0;
152 /* i_data is not going away, no lock needed */
153 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
154 if (!p->key)
155 goto no_block;
156 while (--depth) {
157 bh = sb_getblk(sb, le32_to_cpu(p->key));
860d21e2
TT
158 if (unlikely(!bh)) {
159 ret = -ENOMEM;
dae1e52c 160 goto failure;
860d21e2 161 }
dae1e52c
AG
162
163 if (!bh_uptodate_or_lock(bh)) {
164 if (bh_submit_read(bh) < 0) {
165 put_bh(bh);
166 goto failure;
167 }
168 /* validate block references */
169 if (ext4_check_indirect_blockref(inode, bh)) {
170 put_bh(bh);
171 goto failure;
172 }
173 }
174
175 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
176 /* Reader: end */
177 if (!p->key)
178 goto no_block;
179 }
180 return NULL;
181
182failure:
860d21e2 183 *err = ret;
dae1e52c
AG
184no_block:
185 return p;
186}
187
188/**
189 * ext4_find_near - find a place for allocation with sufficient locality
190 * @inode: owner
191 * @ind: descriptor of indirect block.
192 *
193 * This function returns the preferred place for block allocation.
194 * It is used when heuristic for sequential allocation fails.
195 * Rules are:
196 * + if there is a block to the left of our position - allocate near it.
197 * + if pointer will live in indirect block - allocate near that block.
198 * + if pointer will live in inode - allocate in the same
199 * cylinder group.
200 *
201 * In the latter case we colour the starting block by the callers PID to
202 * prevent it from clashing with concurrent allocations for a different inode
203 * in the same block group. The PID is used here so that functionally related
204 * files will be close-by on-disk.
205 *
206 * Caller must make sure that @ind is valid and will stay that way.
207 */
208static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
209{
210 struct ext4_inode_info *ei = EXT4_I(inode);
211 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
212 __le32 *p;
dae1e52c
AG
213
214 /* Try to find previous block */
215 for (p = ind->p - 1; p >= start; p--) {
216 if (*p)
217 return le32_to_cpu(*p);
218 }
219
220 /* No such thing, so let's try location of indirect block */
221 if (ind->bh)
222 return ind->bh->b_blocknr;
223
224 /*
225 * It is going to be referred to from the inode itself? OK, just put it
226 * into the same cylinder group then.
227 */
f86186b4 228 return ext4_inode_to_goal_block(inode);
dae1e52c
AG
229}
230
231/**
232 * ext4_find_goal - find a preferred place for allocation.
233 * @inode: owner
234 * @block: block we want
235 * @partial: pointer to the last triple within a chain
236 *
237 * Normally this function find the preferred place for block allocation,
238 * returns it.
239 * Because this is only used for non-extent files, we limit the block nr
240 * to 32 bits.
241 */
242static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
243 Indirect *partial)
244{
245 ext4_fsblk_t goal;
246
247 /*
248 * XXX need to get goal block from mballoc's data structures
249 */
250
251 goal = ext4_find_near(inode, partial);
252 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
253 return goal;
254}
255
256/**
257 * ext4_blks_to_allocate - Look up the block map and count the number
258 * of direct blocks need to be allocated for the given branch.
259 *
260 * @branch: chain of indirect blocks
261 * @k: number of blocks need for indirect blocks
262 * @blks: number of data blocks to be mapped.
263 * @blocks_to_boundary: the offset in the indirect block
264 *
265 * return the total number of blocks to be allocate, including the
266 * direct and indirect blocks.
267 */
268static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
269 int blocks_to_boundary)
270{
271 unsigned int count = 0;
272
273 /*
274 * Simple case, [t,d]Indirect block(s) has not allocated yet
275 * then it's clear blocks on that path have not allocated
276 */
277 if (k > 0) {
278 /* right now we don't handle cross boundary allocation */
279 if (blks < blocks_to_boundary + 1)
280 count += blks;
281 else
282 count += blocks_to_boundary + 1;
283 return count;
284 }
285
286 count++;
287 while (count < blks && count <= blocks_to_boundary &&
288 le32_to_cpu(*(branch[0].p + count)) == 0) {
289 count++;
290 }
291 return count;
292}
293
dae1e52c
AG
294/**
295 * ext4_alloc_branch - allocate and set up a chain of blocks.
296 * @handle: handle for this transaction
297 * @inode: owner
298 * @indirect_blks: number of allocated indirect blocks
299 * @blks: number of allocated direct blocks
300 * @goal: preferred place for allocation
301 * @offsets: offsets (in the blocks) to store the pointers to next.
302 * @branch: place to store the chain in.
303 *
304 * This function allocates blocks, zeroes out all but the last one,
305 * links them into chain and (if we are synchronous) writes them to disk.
306 * In other words, it prepares a branch that can be spliced onto the
307 * inode. It stores the information about that chain in the branch[], in
308 * the same format as ext4_get_branch() would do. We are calling it after
309 * we had read the existing part of chain and partial points to the last
310 * triple of that (one with zero ->key). Upon the exit we have the same
311 * picture as after the successful ext4_get_block(), except that in one
312 * place chain is disconnected - *branch->p is still zero (we did not
313 * set the last link), but branch->key contains the number that should
314 * be placed into *branch->p to fill that gap.
315 *
316 * If allocation fails we free all blocks we've allocated (and forget
317 * their buffer_heads) and return the error value the from failed
318 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
319 * as described above and return 0.
320 */
321static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
322 ext4_lblk_t iblock, int indirect_blks,
323 int *blks, ext4_fsblk_t goal,
324 ext4_lblk_t *offsets, Indirect *branch)
325{
781f143e
TT
326 struct ext4_allocation_request ar;
327 struct buffer_head * bh;
328 ext4_fsblk_t b, new_blocks[4];
329 __le32 *p;
330 int i, j, err, len = 1;
dae1e52c 331
dae1e52c 332 /*
781f143e 333 * Set up for the direct block allocation
dae1e52c 334 */
781f143e
TT
335 memset(&ar, 0, sizeof(ar));
336 ar.inode = inode;
337 ar.len = *blks;
338 ar.logical = iblock;
339 if (S_ISREG(inode->i_mode))
340 ar.flags = EXT4_MB_HINT_DATA;
341
342 for (i = 0; i <= indirect_blks; i++) {
343 if (i == indirect_blks) {
344 ar.goal = goal;
345 new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
346 } else
347 goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
348 goal, 0, NULL, &err);
349 if (err) {
350 i--;
351 goto failed;
352 }
353 branch[i].key = cpu_to_le32(new_blocks[i]);
354 if (i == 0)
355 continue;
356
357 bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
dae1e52c 358 if (unlikely(!bh)) {
860d21e2 359 err = -ENOMEM;
dae1e52c
AG
360 goto failed;
361 }
dae1e52c
AG
362 lock_buffer(bh);
363 BUFFER_TRACE(bh, "call get_create_access");
364 err = ext4_journal_get_create_access(handle, bh);
365 if (err) {
dae1e52c
AG
366 unlock_buffer(bh);
367 goto failed;
368 }
369
781f143e
TT
370 memset(bh->b_data, 0, bh->b_size);
371 p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
372 b = new_blocks[i];
373
374 if (i == indirect_blks)
375 len = ar.len;
376 for (j = 0; j < len; j++)
377 *p++ = cpu_to_le32(b++);
378
dae1e52c
AG
379 BUFFER_TRACE(bh, "marking uptodate");
380 set_buffer_uptodate(bh);
381 unlock_buffer(bh);
382
383 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
384 err = ext4_handle_dirty_metadata(handle, inode, bh);
385 if (err)
386 goto failed;
387 }
781f143e
TT
388 *blks = ar.len;
389 return 0;
dae1e52c 390failed:
781f143e
TT
391 for (; i >= 0; i--) {
392 if (i != indirect_blks && branch[i].bh)
393 ext4_forget(handle, 1, inode, branch[i].bh,
394 branch[i].bh->b_blocknr);
395 ext4_free_blocks(handle, inode, NULL, new_blocks[i],
396 (i == indirect_blks) ? ar.len : 1, 0);
dae1e52c 397 }
dae1e52c
AG
398 return err;
399}
400
401/**
402 * ext4_splice_branch - splice the allocated branch onto inode.
403 * @handle: handle for this transaction
404 * @inode: owner
405 * @block: (logical) number of block we are adding
406 * @chain: chain of indirect blocks (with a missing link - see
407 * ext4_alloc_branch)
408 * @where: location of missing link
409 * @num: number of indirect blocks we are adding
410 * @blks: number of direct blocks we are adding
411 *
412 * This function fills the missing link and does all housekeeping needed in
413 * inode (->i_blocks, etc.). In case of success we end up with the full
414 * chain to new block and return 0.
415 */
416static int ext4_splice_branch(handle_t *handle, struct inode *inode,
417 ext4_lblk_t block, Indirect *where, int num,
418 int blks)
419{
420 int i;
421 int err = 0;
422 ext4_fsblk_t current_block;
423
424 /*
425 * If we're splicing into a [td]indirect block (as opposed to the
426 * inode) then we need to get write access to the [td]indirect block
427 * before the splice.
428 */
429 if (where->bh) {
430 BUFFER_TRACE(where->bh, "get_write_access");
431 err = ext4_journal_get_write_access(handle, where->bh);
432 if (err)
433 goto err_out;
434 }
435 /* That's it */
436
437 *where->p = where->key;
438
439 /*
440 * Update the host buffer_head or inode to point to more just allocated
441 * direct blocks blocks
442 */
443 if (num == 0 && blks > 1) {
444 current_block = le32_to_cpu(where->key) + 1;
445 for (i = 1; i < blks; i++)
446 *(where->p + i) = cpu_to_le32(current_block++);
447 }
448
449 /* We are done with atomic stuff, now do the rest of housekeeping */
450 /* had we spliced it onto indirect block? */
451 if (where->bh) {
452 /*
453 * If we spliced it onto an indirect block, we haven't
454 * altered the inode. Note however that if it is being spliced
455 * onto an indirect block at the very end of the file (the
456 * file is growing) then we *will* alter the inode to reflect
457 * the new i_size. But that is not done here - it is done in
458 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
459 */
460 jbd_debug(5, "splicing indirect only\n");
461 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
462 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
463 if (err)
464 goto err_out;
465 } else {
466 /*
467 * OK, we spliced it into the inode itself on a direct block.
468 */
469 ext4_mark_inode_dirty(handle, inode);
470 jbd_debug(5, "splicing direct\n");
471 }
472 return err;
473
474err_out:
475 for (i = 1; i <= num; i++) {
476 /*
477 * branch[i].bh is newly allocated, so there is no
478 * need to revoke the block, which is why we don't
479 * need to set EXT4_FREE_BLOCKS_METADATA.
480 */
481 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
482 EXT4_FREE_BLOCKS_FORGET);
483 }
484 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
485 blks, 0);
486
487 return err;
488}
489
490/*
491 * The ext4_ind_map_blocks() function handles non-extents inodes
492 * (i.e., using the traditional indirect/double-indirect i_blocks
493 * scheme) for ext4_map_blocks().
494 *
495 * Allocation strategy is simple: if we have to allocate something, we will
496 * have to go the whole way to leaf. So let's do it before attaching anything
497 * to tree, set linkage between the newborn blocks, write them if sync is
498 * required, recheck the path, free and repeat if check fails, otherwise
499 * set the last missing link (that will protect us from any truncate-generated
500 * removals - all blocks on the path are immune now) and possibly force the
501 * write on the parent block.
502 * That has a nice additional property: no special recovery from the failed
503 * allocations is needed - we simply release blocks and do not touch anything
504 * reachable from inode.
505 *
506 * `handle' can be NULL if create == 0.
507 *
508 * return > 0, # of blocks mapped or allocated.
509 * return = 0, if plain lookup failed.
510 * return < 0, error case.
511 *
512 * The ext4_ind_get_blocks() function should be called with
513 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
514 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
515 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
516 * blocks.
517 */
518int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
519 struct ext4_map_blocks *map,
520 int flags)
521{
522 int err = -EIO;
523 ext4_lblk_t offsets[4];
524 Indirect chain[4];
525 Indirect *partial;
526 ext4_fsblk_t goal;
527 int indirect_blks;
528 int blocks_to_boundary = 0;
529 int depth;
530 int count = 0;
531 ext4_fsblk_t first_block = 0;
532
533 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
534 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
535 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
536 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
537 &blocks_to_boundary);
538
539 if (depth == 0)
540 goto out;
541
542 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
543
544 /* Simplest case - block found, no allocation needed */
545 if (!partial) {
546 first_block = le32_to_cpu(chain[depth - 1].key);
547 count++;
548 /*map more blocks*/
549 while (count < map->m_len && count <= blocks_to_boundary) {
550 ext4_fsblk_t blk;
551
552 blk = le32_to_cpu(*(chain[depth-1].p + count));
553
554 if (blk == first_block + count)
555 count++;
556 else
557 break;
558 }
559 goto got_it;
560 }
561
562 /* Next simple case - plain lookup or failed read of indirect block */
563 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
564 goto cleanup;
565
566 /*
567 * Okay, we need to do block allocation.
568 */
bab08ab9
TT
569 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
570 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
571 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
572 "non-extent mapped inodes with bigalloc");
573 return -ENOSPC;
574 }
575
dae1e52c
AG
576 goal = ext4_find_goal(inode, map->m_lblk, partial);
577
578 /* the number of blocks need to allocate for [d,t]indirect blocks */
579 indirect_blks = (chain + depth) - partial - 1;
580
581 /*
582 * Next look up the indirect map to count the totoal number of
583 * direct blocks to allocate for this branch.
584 */
585 count = ext4_blks_to_allocate(partial, indirect_blks,
586 map->m_len, blocks_to_boundary);
587 /*
588 * Block out ext4_truncate while we alter the tree
589 */
590 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
591 &count, goal,
592 offsets + (partial - chain), partial);
593
594 /*
595 * The ext4_splice_branch call will free and forget any buffers
596 * on the new chain if there is a failure, but that risks using
597 * up transaction credits, especially for bitmaps where the
598 * credits cannot be returned. Can we handle this somehow? We
599 * may need to return -EAGAIN upwards in the worst case. --sct
600 */
601 if (!err)
602 err = ext4_splice_branch(handle, inode, map->m_lblk,
603 partial, indirect_blks, count);
604 if (err)
605 goto cleanup;
606
607 map->m_flags |= EXT4_MAP_NEW;
608
609 ext4_update_inode_fsync_trans(handle, inode, 1);
610got_it:
611 map->m_flags |= EXT4_MAP_MAPPED;
612 map->m_pblk = le32_to_cpu(chain[depth-1].key);
613 map->m_len = count;
614 if (count > blocks_to_boundary)
615 map->m_flags |= EXT4_MAP_BOUNDARY;
616 err = count;
617 /* Clean up and exit */
618 partial = chain + depth - 1; /* the whole chain */
619cleanup:
620 while (partial > chain) {
621 BUFFER_TRACE(partial->bh, "call brelse");
622 brelse(partial->bh);
623 partial--;
624 }
625out:
21ddd568 626 trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
dae1e52c
AG
627 return err;
628}
629
630/*
631 * O_DIRECT for ext3 (or indirect map) based files
632 *
633 * If the O_DIRECT write will extend the file then add this inode to the
634 * orphan list. So recovery will truncate it back to the original size
635 * if the machine crashes during the write.
636 *
637 * If the O_DIRECT write is intantiating holes inside i_size and the machine
638 * crashes then stale disk data _may_ be exposed inside the file. But current
639 * VFS code falls back into buffered path in that case so we are safe.
640 */
641ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
16b1f05d 642 struct iov_iter *iter, loff_t offset)
dae1e52c
AG
643{
644 struct file *file = iocb->ki_filp;
645 struct inode *inode = file->f_mapping->host;
646 struct ext4_inode_info *ei = EXT4_I(inode);
647 handle_t *handle;
648 ssize_t ret;
649 int orphan = 0;
16b1f05d 650 size_t count = iov_length(iter->iov, iter->nr_segs);
dae1e52c
AG
651 int retries = 0;
652
653 if (rw == WRITE) {
654 loff_t final_size = offset + count;
655
656 if (final_size > inode->i_size) {
657 /* Credits for sb + inode write */
9924a92a 658 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
dae1e52c
AG
659 if (IS_ERR(handle)) {
660 ret = PTR_ERR(handle);
661 goto out;
662 }
663 ret = ext4_orphan_add(handle, inode);
664 if (ret) {
665 ext4_journal_stop(handle);
666 goto out;
667 }
668 orphan = 1;
669 ei->i_disksize = inode->i_size;
670 ext4_journal_stop(handle);
671 }
672 }
673
674retry:
dccaf33f 675 if (rw == READ && ext4_should_dioread_nolock(inode)) {
17335dcc
DM
676 /*
677 * Nolock dioread optimization may be dynamically disabled
678 * via ext4_inode_block_unlocked_dio(). Check inode's state
679 * while holding extra i_dio_count ref.
680 */
681 atomic_inc(&inode->i_dio_count);
682 smp_mb();
683 if (unlikely(ext4_test_inode_state(inode,
684 EXT4_STATE_DIOREAD_LOCK))) {
685 inode_dio_done(inode);
686 goto locked;
687 }
dae1e52c 688 ret = __blockdev_direct_IO(rw, iocb, inode,
16b1f05d
AV
689 inode->i_sb->s_bdev, iter->iov,
690 offset, iter->nr_segs,
dae1e52c 691 ext4_get_block, NULL, NULL, 0);
17335dcc 692 inode_dio_done(inode);
dccaf33f 693 } else {
17335dcc 694locked:
16b1f05d
AV
695 ret = blockdev_direct_IO(rw, iocb, inode, iter->iov,
696 offset, iter->nr_segs, ext4_get_block);
dae1e52c
AG
697
698 if (unlikely((rw & WRITE) && ret < 0)) {
699 loff_t isize = i_size_read(inode);
16b1f05d 700 loff_t end = offset + count;
dae1e52c
AG
701
702 if (end > isize)
703 ext4_truncate_failed_write(inode);
704 }
705 }
706 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
707 goto retry;
708
709 if (orphan) {
710 int err;
711
712 /* Credits for sb + inode write */
9924a92a 713 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
dae1e52c
AG
714 if (IS_ERR(handle)) {
715 /* This is really bad luck. We've written the data
716 * but cannot extend i_size. Bail out and pretend
717 * the write failed... */
718 ret = PTR_ERR(handle);
719 if (inode->i_nlink)
720 ext4_orphan_del(NULL, inode);
721
722 goto out;
723 }
724 if (inode->i_nlink)
725 ext4_orphan_del(handle, inode);
726 if (ret > 0) {
727 loff_t end = offset + ret;
728 if (end > inode->i_size) {
729 ei->i_disksize = end;
730 i_size_write(inode, end);
731 /*
732 * We're going to return a positive `ret'
733 * here due to non-zero-length I/O, so there's
734 * no way of reporting error returns from
735 * ext4_mark_inode_dirty() to userspace. So
736 * ignore it.
737 */
738 ext4_mark_inode_dirty(handle, inode);
739 }
740 }
741 err = ext4_journal_stop(handle);
742 if (ret == 0)
743 ret = err;
744 }
745out:
746 return ret;
747}
748
749/*
750 * Calculate the number of metadata blocks need to reserve
751 * to allocate a new block at @lblocks for non extent file based file
752 */
753int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
754{
755 struct ext4_inode_info *ei = EXT4_I(inode);
756 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
757 int blk_bits;
758
759 if (lblock < EXT4_NDIR_BLOCKS)
760 return 0;
761
762 lblock -= EXT4_NDIR_BLOCKS;
763
764 if (ei->i_da_metadata_calc_len &&
765 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
766 ei->i_da_metadata_calc_len++;
767 return 0;
768 }
769 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
770 ei->i_da_metadata_calc_len = 1;
771 blk_bits = order_base_2(lblock);
772 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
773}
774
fa55a0ed
JK
775/*
776 * Calculate number of indirect blocks touched by mapping @nrblocks logically
777 * contiguous blocks
778 */
779int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
dae1e52c 780{
dae1e52c 781 /*
fa55a0ed
JK
782 * With N contiguous data blocks, we need at most
783 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
784 * 2 dindirect blocks, and 1 tindirect block
dae1e52c 785 */
fa55a0ed 786 return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
dae1e52c
AG
787}
788
789/*
790 * Truncate transactions can be complex and absolutely huge. So we need to
791 * be able to restart the transaction at a conventient checkpoint to make
792 * sure we don't overflow the journal.
793 *
819c4920 794 * Try to extend this transaction for the purposes of truncation. If
dae1e52c
AG
795 * extend fails, we need to propagate the failure up and restart the
796 * transaction in the top-level truncate loop. --sct
dae1e52c
AG
797 *
798 * Returns 0 if we managed to create more room. If we can't create more
799 * room, and the transaction must be restarted we return 1.
800 */
801static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
802{
803 if (!ext4_handle_valid(handle))
804 return 0;
805 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
806 return 0;
807 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
808 return 0;
809 return 1;
810}
811
812/*
813 * Probably it should be a library function... search for first non-zero word
814 * or memcmp with zero_page, whatever is better for particular architecture.
815 * Linus?
816 */
817static inline int all_zeroes(__le32 *p, __le32 *q)
818{
819 while (p < q)
820 if (*p++)
821 return 0;
822 return 1;
823}
824
825/**
826 * ext4_find_shared - find the indirect blocks for partial truncation.
827 * @inode: inode in question
828 * @depth: depth of the affected branch
829 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
830 * @chain: place to store the pointers to partial indirect blocks
831 * @top: place to the (detached) top of branch
832 *
833 * This is a helper function used by ext4_truncate().
834 *
835 * When we do truncate() we may have to clean the ends of several
836 * indirect blocks but leave the blocks themselves alive. Block is
837 * partially truncated if some data below the new i_size is referred
838 * from it (and it is on the path to the first completely truncated
839 * data block, indeed). We have to free the top of that path along
840 * with everything to the right of the path. Since no allocation
841 * past the truncation point is possible until ext4_truncate()
842 * finishes, we may safely do the latter, but top of branch may
843 * require special attention - pageout below the truncation point
844 * might try to populate it.
845 *
846 * We atomically detach the top of branch from the tree, store the
847 * block number of its root in *@top, pointers to buffer_heads of
848 * partially truncated blocks - in @chain[].bh and pointers to
849 * their last elements that should not be removed - in
850 * @chain[].p. Return value is the pointer to last filled element
851 * of @chain.
852 *
853 * The work left to caller to do the actual freeing of subtrees:
854 * a) free the subtree starting from *@top
855 * b) free the subtrees whose roots are stored in
856 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
857 * c) free the subtrees growing from the inode past the @chain[0].
858 * (no partially truncated stuff there). */
859
860static Indirect *ext4_find_shared(struct inode *inode, int depth,
861 ext4_lblk_t offsets[4], Indirect chain[4],
862 __le32 *top)
863{
864 Indirect *partial, *p;
865 int k, err;
866
867 *top = 0;
868 /* Make k index the deepest non-null offset + 1 */
869 for (k = depth; k > 1 && !offsets[k-1]; k--)
870 ;
871 partial = ext4_get_branch(inode, k, offsets, chain, &err);
872 /* Writer: pointers */
873 if (!partial)
874 partial = chain + k-1;
875 /*
876 * If the branch acquired continuation since we've looked at it -
877 * fine, it should all survive and (new) top doesn't belong to us.
878 */
879 if (!partial->key && *partial->p)
880 /* Writer: end */
881 goto no_top;
882 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
883 ;
884 /*
885 * OK, we've found the last block that must survive. The rest of our
886 * branch should be detached before unlocking. However, if that rest
887 * of branch is all ours and does not grow immediately from the inode
888 * it's easier to cheat and just decrement partial->p.
889 */
890 if (p == chain + k - 1 && p > chain) {
891 p->p--;
892 } else {
893 *top = *p->p;
894 /* Nope, don't do this in ext4. Must leave the tree intact */
895#if 0
896 *p->p = 0;
897#endif
898 }
899 /* Writer: end */
900
901 while (partial > p) {
902 brelse(partial->bh);
903 partial--;
904 }
905no_top:
906 return partial;
907}
908
909/*
910 * Zero a number of block pointers in either an inode or an indirect block.
911 * If we restart the transaction we must again get write access to the
912 * indirect block for further modification.
913 *
914 * We release `count' blocks on disk, but (last - first) may be greater
915 * than `count' because there can be holes in there.
916 *
917 * Return 0 on success, 1 on invalid block range
918 * and < 0 on fatal error.
919 */
920static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
921 struct buffer_head *bh,
922 ext4_fsblk_t block_to_free,
923 unsigned long count, __le32 *first,
924 __le32 *last)
925{
926 __le32 *p;
981250ca 927 int flags = EXT4_FREE_BLOCKS_VALIDATED;
dae1e52c
AG
928 int err;
929
930 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
981250ca
TT
931 flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
932 else if (ext4_should_journal_data(inode))
933 flags |= EXT4_FREE_BLOCKS_FORGET;
dae1e52c
AG
934
935 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
936 count)) {
937 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
938 "blocks %llu len %lu",
939 (unsigned long long) block_to_free, count);
940 return 1;
941 }
942
943 if (try_to_extend_transaction(handle, inode)) {
944 if (bh) {
945 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
946 err = ext4_handle_dirty_metadata(handle, inode, bh);
947 if (unlikely(err))
948 goto out_err;
949 }
950 err = ext4_mark_inode_dirty(handle, inode);
951 if (unlikely(err))
952 goto out_err;
953 err = ext4_truncate_restart_trans(handle, inode,
954 ext4_blocks_for_truncate(inode));
955 if (unlikely(err))
956 goto out_err;
957 if (bh) {
958 BUFFER_TRACE(bh, "retaking write access");
959 err = ext4_journal_get_write_access(handle, bh);
960 if (unlikely(err))
961 goto out_err;
962 }
963 }
964
965 for (p = first; p < last; p++)
966 *p = 0;
967
968 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
969 return 0;
970out_err:
971 ext4_std_error(inode->i_sb, err);
972 return err;
973}
974
975/**
976 * ext4_free_data - free a list of data blocks
977 * @handle: handle for this transaction
978 * @inode: inode we are dealing with
979 * @this_bh: indirect buffer_head which contains *@first and *@last
980 * @first: array of block numbers
981 * @last: points immediately past the end of array
982 *
983 * We are freeing all blocks referred from that array (numbers are stored as
984 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
985 *
986 * We accumulate contiguous runs of blocks to free. Conveniently, if these
987 * blocks are contiguous then releasing them at one time will only affect one
988 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
989 * actually use a lot of journal space.
990 *
991 * @this_bh will be %NULL if @first and @last point into the inode's direct
992 * block pointers.
993 */
994static void ext4_free_data(handle_t *handle, struct inode *inode,
995 struct buffer_head *this_bh,
996 __le32 *first, __le32 *last)
997{
998 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
999 unsigned long count = 0; /* Number of blocks in the run */
1000 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
1001 corresponding to
1002 block_to_free */
1003 ext4_fsblk_t nr; /* Current block # */
1004 __le32 *p; /* Pointer into inode/ind
1005 for current block */
1006 int err = 0;
1007
1008 if (this_bh) { /* For indirect block */
1009 BUFFER_TRACE(this_bh, "get_write_access");
1010 err = ext4_journal_get_write_access(handle, this_bh);
1011 /* Important: if we can't update the indirect pointers
1012 * to the blocks, we can't free them. */
1013 if (err)
1014 return;
1015 }
1016
1017 for (p = first; p < last; p++) {
1018 nr = le32_to_cpu(*p);
1019 if (nr) {
1020 /* accumulate blocks to free if they're contiguous */
1021 if (count == 0) {
1022 block_to_free = nr;
1023 block_to_free_p = p;
1024 count = 1;
1025 } else if (nr == block_to_free + count) {
1026 count++;
1027 } else {
1028 err = ext4_clear_blocks(handle, inode, this_bh,
1029 block_to_free, count,
1030 block_to_free_p, p);
1031 if (err)
1032 break;
1033 block_to_free = nr;
1034 block_to_free_p = p;
1035 count = 1;
1036 }
1037 }
1038 }
1039
1040 if (!err && count > 0)
1041 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1042 count, block_to_free_p, p);
1043 if (err < 0)
1044 /* fatal error */
1045 return;
1046
1047 if (this_bh) {
1048 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
1049
1050 /*
1051 * The buffer head should have an attached journal head at this
1052 * point. However, if the data is corrupted and an indirect
1053 * block pointed to itself, it would have been detached when
1054 * the block was cleared. Check for this instead of OOPSing.
1055 */
1056 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
1057 ext4_handle_dirty_metadata(handle, inode, this_bh);
1058 else
1059 EXT4_ERROR_INODE(inode,
1060 "circular indirect block detected at "
1061 "block %llu",
1062 (unsigned long long) this_bh->b_blocknr);
1063 }
1064}
1065
1066/**
1067 * ext4_free_branches - free an array of branches
1068 * @handle: JBD handle for this transaction
1069 * @inode: inode we are dealing with
1070 * @parent_bh: the buffer_head which contains *@first and *@last
1071 * @first: array of block numbers
1072 * @last: pointer immediately past the end of array
1073 * @depth: depth of the branches to free
1074 *
1075 * We are freeing all blocks referred from these branches (numbers are
1076 * stored as little-endian 32-bit) and updating @inode->i_blocks
1077 * appropriately.
1078 */
1079static void ext4_free_branches(handle_t *handle, struct inode *inode,
1080 struct buffer_head *parent_bh,
1081 __le32 *first, __le32 *last, int depth)
1082{
1083 ext4_fsblk_t nr;
1084 __le32 *p;
1085
1086 if (ext4_handle_is_aborted(handle))
1087 return;
1088
1089 if (depth--) {
1090 struct buffer_head *bh;
1091 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1092 p = last;
1093 while (--p >= first) {
1094 nr = le32_to_cpu(*p);
1095 if (!nr)
1096 continue; /* A hole */
1097
1098 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1099 nr, 1)) {
1100 EXT4_ERROR_INODE(inode,
1101 "invalid indirect mapped "
1102 "block %lu (level %d)",
1103 (unsigned long) nr, depth);
1104 break;
1105 }
1106
1107 /* Go read the buffer for the next level down */
1108 bh = sb_bread(inode->i_sb, nr);
1109
1110 /*
1111 * A read failure? Report error and clear slot
1112 * (should be rare).
1113 */
1114 if (!bh) {
1115 EXT4_ERROR_INODE_BLOCK(inode, nr,
1116 "Read failure");
1117 continue;
1118 }
1119
1120 /* This zaps the entire block. Bottom up. */
1121 BUFFER_TRACE(bh, "free child branches");
1122 ext4_free_branches(handle, inode, bh,
1123 (__le32 *) bh->b_data,
1124 (__le32 *) bh->b_data + addr_per_block,
1125 depth);
1126 brelse(bh);
1127
1128 /*
1129 * Everything below this this pointer has been
1130 * released. Now let this top-of-subtree go.
1131 *
1132 * We want the freeing of this indirect block to be
1133 * atomic in the journal with the updating of the
1134 * bitmap block which owns it. So make some room in
1135 * the journal.
1136 *
1137 * We zero the parent pointer *after* freeing its
1138 * pointee in the bitmaps, so if extend_transaction()
1139 * for some reason fails to put the bitmap changes and
1140 * the release into the same transaction, recovery
1141 * will merely complain about releasing a free block,
1142 * rather than leaking blocks.
1143 */
1144 if (ext4_handle_is_aborted(handle))
1145 return;
1146 if (try_to_extend_transaction(handle, inode)) {
1147 ext4_mark_inode_dirty(handle, inode);
1148 ext4_truncate_restart_trans(handle, inode,
1149 ext4_blocks_for_truncate(inode));
1150 }
1151
1152 /*
1153 * The forget flag here is critical because if
1154 * we are journaling (and not doing data
1155 * journaling), we have to make sure a revoke
1156 * record is written to prevent the journal
1157 * replay from overwriting the (former)
1158 * indirect block if it gets reallocated as a
1159 * data block. This must happen in the same
1160 * transaction where the data blocks are
1161 * actually freed.
1162 */
1163 ext4_free_blocks(handle, inode, NULL, nr, 1,
1164 EXT4_FREE_BLOCKS_METADATA|
1165 EXT4_FREE_BLOCKS_FORGET);
1166
1167 if (parent_bh) {
1168 /*
1169 * The block which we have just freed is
1170 * pointed to by an indirect block: journal it
1171 */
1172 BUFFER_TRACE(parent_bh, "get_write_access");
1173 if (!ext4_journal_get_write_access(handle,
1174 parent_bh)){
1175 *p = 0;
1176 BUFFER_TRACE(parent_bh,
1177 "call ext4_handle_dirty_metadata");
1178 ext4_handle_dirty_metadata(handle,
1179 inode,
1180 parent_bh);
1181 }
1182 }
1183 }
1184 } else {
1185 /* We have reached the bottom of the tree. */
1186 BUFFER_TRACE(parent_bh, "free data blocks");
1187 ext4_free_data(handle, inode, parent_bh, first, last);
1188 }
1189}
1190
819c4920 1191void ext4_ind_truncate(handle_t *handle, struct inode *inode)
dae1e52c 1192{
dae1e52c
AG
1193 struct ext4_inode_info *ei = EXT4_I(inode);
1194 __le32 *i_data = ei->i_data;
1195 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
dae1e52c
AG
1196 ext4_lblk_t offsets[4];
1197 Indirect chain[4];
1198 Indirect *partial;
1199 __le32 nr = 0;
1200 int n = 0;
1201 ext4_lblk_t last_block, max_block;
1202 unsigned blocksize = inode->i_sb->s_blocksize;
dae1e52c
AG
1203
1204 last_block = (inode->i_size + blocksize-1)
1205 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1206 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1207 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1208
dae1e52c
AG
1209 if (last_block != max_block) {
1210 n = ext4_block_to_path(inode, last_block, offsets, NULL);
1211 if (n == 0)
819c4920 1212 return;
dae1e52c
AG
1213 }
1214
51865fda 1215 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
dae1e52c
AG
1216
1217 /*
1218 * The orphan list entry will now protect us from any crash which
1219 * occurs before the truncate completes, so it is now safe to propagate
1220 * the new, shorter inode size (held for now in i_size) into the
1221 * on-disk inode. We do this via i_disksize, which is the value which
1222 * ext4 *really* writes onto the disk inode.
1223 */
1224 ei->i_disksize = inode->i_size;
1225
1226 if (last_block == max_block) {
1227 /*
1228 * It is unnecessary to free any data blocks if last_block is
1229 * equal to the indirect block limit.
1230 */
819c4920 1231 return;
dae1e52c
AG
1232 } else if (n == 1) { /* direct blocks */
1233 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1234 i_data + EXT4_NDIR_BLOCKS);
1235 goto do_indirects;
1236 }
1237
1238 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1239 /* Kill the top of shared branch (not detached) */
1240 if (nr) {
1241 if (partial == chain) {
1242 /* Shared branch grows from the inode */
1243 ext4_free_branches(handle, inode, NULL,
1244 &nr, &nr+1, (chain+n-1) - partial);
1245 *partial->p = 0;
1246 /*
1247 * We mark the inode dirty prior to restart,
1248 * and prior to stop. No need for it here.
1249 */
1250 } else {
1251 /* Shared branch grows from an indirect block */
1252 BUFFER_TRACE(partial->bh, "get_write_access");
1253 ext4_free_branches(handle, inode, partial->bh,
1254 partial->p,
1255 partial->p+1, (chain+n-1) - partial);
1256 }
1257 }
1258 /* Clear the ends of indirect blocks on the shared branch */
1259 while (partial > chain) {
1260 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1261 (__le32*)partial->bh->b_data+addr_per_block,
1262 (chain+n-1) - partial);
1263 BUFFER_TRACE(partial->bh, "call brelse");
1264 brelse(partial->bh);
1265 partial--;
1266 }
1267do_indirects:
1268 /* Kill the remaining (whole) subtrees */
1269 switch (offsets[0]) {
1270 default:
1271 nr = i_data[EXT4_IND_BLOCK];
1272 if (nr) {
1273 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1274 i_data[EXT4_IND_BLOCK] = 0;
1275 }
1276 case EXT4_IND_BLOCK:
1277 nr = i_data[EXT4_DIND_BLOCK];
1278 if (nr) {
1279 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1280 i_data[EXT4_DIND_BLOCK] = 0;
1281 }
1282 case EXT4_DIND_BLOCK:
1283 nr = i_data[EXT4_TIND_BLOCK];
1284 if (nr) {
1285 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1286 i_data[EXT4_TIND_BLOCK] = 0;
1287 }
1288 case EXT4_TIND_BLOCK:
1289 ;
1290 }
dae1e52c
AG
1291}
1292
8bad6fc8
ZL
1293static int free_hole_blocks(handle_t *handle, struct inode *inode,
1294 struct buffer_head *parent_bh, __le32 *i_data,
1295 int level, ext4_lblk_t first,
1296 ext4_lblk_t count, int max)
1297{
1298 struct buffer_head *bh = NULL;
1299 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1300 int ret = 0;
1301 int i, inc;
1302 ext4_lblk_t offset;
1303 __le32 blk;
1304
1305 inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
1306 for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
1307 if (offset >= count + first)
1308 break;
1309 if (*i_data == 0 || (offset + inc) <= first)
1310 continue;
1311 blk = *i_data;
1312 if (level > 0) {
1313 ext4_lblk_t first2;
8cde7ad1 1314 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
8bad6fc8 1315 if (!bh) {
8cde7ad1 1316 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
8bad6fc8
ZL
1317 "Read failure");
1318 return -EIO;
1319 }
1320 first2 = (first > offset) ? first - offset : 0;
1321 ret = free_hole_blocks(handle, inode, bh,
1322 (__le32 *)bh->b_data, level - 1,
1323 first2, count - offset,
1324 inode->i_sb->s_blocksize >> 2);
1325 if (ret) {
1326 brelse(bh);
1327 goto err;
1328 }
1329 }
1330 if (level == 0 ||
1331 (bh && all_zeroes((__le32 *)bh->b_data,
1332 (__le32 *)bh->b_data + addr_per_block))) {
1333 ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
1334 *i_data = 0;
1335 }
1336 brelse(bh);
1337 bh = NULL;
1338 }
1339
1340err:
1341 return ret;
1342}
1343
26a4c0c6
TT
1344int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
1345 ext4_lblk_t first, ext4_lblk_t stop)
8bad6fc8
ZL
1346{
1347 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1348 int level, ret = 0;
1349 int num = EXT4_NDIR_BLOCKS;
1350 ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
1351 __le32 *i_data = EXT4_I(inode)->i_data;
1352
1353 count = stop - first;
1354 for (level = 0; level < 4; level++, max *= addr_per_block) {
1355 if (first < max) {
1356 ret = free_hole_blocks(handle, inode, NULL, i_data,
1357 level, first, count, num);
1358 if (ret)
1359 goto err;
1360 if (count > max - first)
1361 count -= max - first;
1362 else
1363 break;
1364 first = 0;
1365 } else {
1366 first -= max;
1367 }
1368 i_data += num;
1369 if (level == 0) {
1370 num = 1;
1371 max = 1;
1372 }
1373 }
1374
1375err:
1376 return ret;
1377}
1378
This page took 0.404728 seconds and 5 git commands to generate.