ext4: fix incorrect block reservation on quota transfer.
[deliverable/linux.git] / fs / ext4 / inode.c
1 /*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
21 *
22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23 */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41
42 #include "ext4_jbd2.h"
43 #include "xattr.h"
44 #include "acl.h"
45 #include "ext4_extents.h"
46
47 #include <trace/events/ext4.h>
48
49 #define MPAGE_DA_EXTENT_TAIL 0x01
50
51 static inline int ext4_begin_ordered_truncate(struct inode *inode,
52 loff_t new_size)
53 {
54 return jbd2_journal_begin_ordered_truncate(
55 EXT4_SB(inode->i_sb)->s_journal,
56 &EXT4_I(inode)->jinode,
57 new_size);
58 }
59
60 static void ext4_invalidatepage(struct page *page, unsigned long offset);
61
62 /*
63 * Test whether an inode is a fast symlink.
64 */
65 static int ext4_inode_is_fast_symlink(struct inode *inode)
66 {
67 int ea_blocks = EXT4_I(inode)->i_file_acl ?
68 (inode->i_sb->s_blocksize >> 9) : 0;
69
70 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
71 }
72
73 /*
74 * Work out how many blocks we need to proceed with the next chunk of a
75 * truncate transaction.
76 */
77 static unsigned long blocks_for_truncate(struct inode *inode)
78 {
79 ext4_lblk_t needed;
80
81 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
82
83 /* Give ourselves just enough room to cope with inodes in which
84 * i_blocks is corrupt: we've seen disk corruptions in the past
85 * which resulted in random data in an inode which looked enough
86 * like a regular file for ext4 to try to delete it. Things
87 * will go a bit crazy if that happens, but at least we should
88 * try not to panic the whole kernel. */
89 if (needed < 2)
90 needed = 2;
91
92 /* But we need to bound the transaction so we don't overflow the
93 * journal. */
94 if (needed > EXT4_MAX_TRANS_DATA)
95 needed = EXT4_MAX_TRANS_DATA;
96
97 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
98 }
99
100 /*
101 * Truncate transactions can be complex and absolutely huge. So we need to
102 * be able to restart the transaction at a conventient checkpoint to make
103 * sure we don't overflow the journal.
104 *
105 * start_transaction gets us a new handle for a truncate transaction,
106 * and extend_transaction tries to extend the existing one a bit. If
107 * extend fails, we need to propagate the failure up and restart the
108 * transaction in the top-level truncate loop. --sct
109 */
110 static handle_t *start_transaction(struct inode *inode)
111 {
112 handle_t *result;
113
114 result = ext4_journal_start(inode, blocks_for_truncate(inode));
115 if (!IS_ERR(result))
116 return result;
117
118 ext4_std_error(inode->i_sb, PTR_ERR(result));
119 return result;
120 }
121
122 /*
123 * Try to extend this transaction for the purposes of truncation.
124 *
125 * Returns 0 if we managed to create more room. If we can't create more
126 * room, and the transaction must be restarted we return 1.
127 */
128 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
129 {
130 if (!ext4_handle_valid(handle))
131 return 0;
132 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
133 return 0;
134 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
135 return 0;
136 return 1;
137 }
138
139 /*
140 * Restart the transaction associated with *handle. This does a commit,
141 * so before we call here everything must be consistently dirtied against
142 * this transaction.
143 */
144 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
145 int nblocks)
146 {
147 int ret;
148
149 /*
150 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
151 * moment, get_block can be called only for blocks inside i_size since
152 * page cache has been already dropped and writes are blocked by
153 * i_mutex. So we can safely drop the i_data_sem here.
154 */
155 BUG_ON(EXT4_JOURNAL(inode) == NULL);
156 jbd_debug(2, "restarting handle %p\n", handle);
157 up_write(&EXT4_I(inode)->i_data_sem);
158 ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
159 down_write(&EXT4_I(inode)->i_data_sem);
160 ext4_discard_preallocations(inode);
161
162 return ret;
163 }
164
165 /*
166 * Called at the last iput() if i_nlink is zero.
167 */
168 void ext4_delete_inode(struct inode *inode)
169 {
170 handle_t *handle;
171 int err;
172
173 if (ext4_should_order_data(inode))
174 ext4_begin_ordered_truncate(inode, 0);
175 truncate_inode_pages(&inode->i_data, 0);
176
177 if (is_bad_inode(inode))
178 goto no_delete;
179
180 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
181 if (IS_ERR(handle)) {
182 ext4_std_error(inode->i_sb, PTR_ERR(handle));
183 /*
184 * If we're going to skip the normal cleanup, we still need to
185 * make sure that the in-core orphan linked list is properly
186 * cleaned up.
187 */
188 ext4_orphan_del(NULL, inode);
189 goto no_delete;
190 }
191
192 if (IS_SYNC(inode))
193 ext4_handle_sync(handle);
194 inode->i_size = 0;
195 err = ext4_mark_inode_dirty(handle, inode);
196 if (err) {
197 ext4_warning(inode->i_sb, __func__,
198 "couldn't mark inode dirty (err %d)", err);
199 goto stop_handle;
200 }
201 if (inode->i_blocks)
202 ext4_truncate(inode);
203
204 /*
205 * ext4_ext_truncate() doesn't reserve any slop when it
206 * restarts journal transactions; therefore there may not be
207 * enough credits left in the handle to remove the inode from
208 * the orphan list and set the dtime field.
209 */
210 if (!ext4_handle_has_enough_credits(handle, 3)) {
211 err = ext4_journal_extend(handle, 3);
212 if (err > 0)
213 err = ext4_journal_restart(handle, 3);
214 if (err != 0) {
215 ext4_warning(inode->i_sb, __func__,
216 "couldn't extend journal (err %d)", err);
217 stop_handle:
218 ext4_journal_stop(handle);
219 goto no_delete;
220 }
221 }
222
223 /*
224 * Kill off the orphan record which ext4_truncate created.
225 * AKPM: I think this can be inside the above `if'.
226 * Note that ext4_orphan_del() has to be able to cope with the
227 * deletion of a non-existent orphan - this is because we don't
228 * know if ext4_truncate() actually created an orphan record.
229 * (Well, we could do this if we need to, but heck - it works)
230 */
231 ext4_orphan_del(handle, inode);
232 EXT4_I(inode)->i_dtime = get_seconds();
233
234 /*
235 * One subtle ordering requirement: if anything has gone wrong
236 * (transaction abort, IO errors, whatever), then we can still
237 * do these next steps (the fs will already have been marked as
238 * having errors), but we can't free the inode if the mark_dirty
239 * fails.
240 */
241 if (ext4_mark_inode_dirty(handle, inode))
242 /* If that failed, just do the required in-core inode clear. */
243 clear_inode(inode);
244 else
245 ext4_free_inode(handle, inode);
246 ext4_journal_stop(handle);
247 return;
248 no_delete:
249 clear_inode(inode); /* We must guarantee clearing of inode... */
250 }
251
252 typedef struct {
253 __le32 *p;
254 __le32 key;
255 struct buffer_head *bh;
256 } Indirect;
257
258 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
259 {
260 p->key = *(p->p = v);
261 p->bh = bh;
262 }
263
264 /**
265 * ext4_block_to_path - parse the block number into array of offsets
266 * @inode: inode in question (we are only interested in its superblock)
267 * @i_block: block number to be parsed
268 * @offsets: array to store the offsets in
269 * @boundary: set this non-zero if the referred-to block is likely to be
270 * followed (on disk) by an indirect block.
271 *
272 * To store the locations of file's data ext4 uses a data structure common
273 * for UNIX filesystems - tree of pointers anchored in the inode, with
274 * data blocks at leaves and indirect blocks in intermediate nodes.
275 * This function translates the block number into path in that tree -
276 * return value is the path length and @offsets[n] is the offset of
277 * pointer to (n+1)th node in the nth one. If @block is out of range
278 * (negative or too large) warning is printed and zero returned.
279 *
280 * Note: function doesn't find node addresses, so no IO is needed. All
281 * we need to know is the capacity of indirect blocks (taken from the
282 * inode->i_sb).
283 */
284
285 /*
286 * Portability note: the last comparison (check that we fit into triple
287 * indirect block) is spelled differently, because otherwise on an
288 * architecture with 32-bit longs and 8Kb pages we might get into trouble
289 * if our filesystem had 8Kb blocks. We might use long long, but that would
290 * kill us on x86. Oh, well, at least the sign propagation does not matter -
291 * i_block would have to be negative in the very beginning, so we would not
292 * get there at all.
293 */
294
295 static int ext4_block_to_path(struct inode *inode,
296 ext4_lblk_t i_block,
297 ext4_lblk_t offsets[4], int *boundary)
298 {
299 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
300 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
301 const long direct_blocks = EXT4_NDIR_BLOCKS,
302 indirect_blocks = ptrs,
303 double_blocks = (1 << (ptrs_bits * 2));
304 int n = 0;
305 int final = 0;
306
307 if (i_block < direct_blocks) {
308 offsets[n++] = i_block;
309 final = direct_blocks;
310 } else if ((i_block -= direct_blocks) < indirect_blocks) {
311 offsets[n++] = EXT4_IND_BLOCK;
312 offsets[n++] = i_block;
313 final = ptrs;
314 } else if ((i_block -= indirect_blocks) < double_blocks) {
315 offsets[n++] = EXT4_DIND_BLOCK;
316 offsets[n++] = i_block >> ptrs_bits;
317 offsets[n++] = i_block & (ptrs - 1);
318 final = ptrs;
319 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
320 offsets[n++] = EXT4_TIND_BLOCK;
321 offsets[n++] = i_block >> (ptrs_bits * 2);
322 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
323 offsets[n++] = i_block & (ptrs - 1);
324 final = ptrs;
325 } else {
326 ext4_warning(inode->i_sb, "ext4_block_to_path",
327 "block %lu > max in inode %lu",
328 i_block + direct_blocks +
329 indirect_blocks + double_blocks, inode->i_ino);
330 }
331 if (boundary)
332 *boundary = final - 1 - (i_block & (ptrs - 1));
333 return n;
334 }
335
336 static int __ext4_check_blockref(const char *function, struct inode *inode,
337 __le32 *p, unsigned int max)
338 {
339 __le32 *bref = p;
340 unsigned int blk;
341
342 while (bref < p+max) {
343 blk = le32_to_cpu(*bref++);
344 if (blk &&
345 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
346 blk, 1))) {
347 ext4_error(inode->i_sb, function,
348 "invalid block reference %u "
349 "in inode #%lu", blk, inode->i_ino);
350 return -EIO;
351 }
352 }
353 return 0;
354 }
355
356
357 #define ext4_check_indirect_blockref(inode, bh) \
358 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
359 EXT4_ADDR_PER_BLOCK((inode)->i_sb))
360
361 #define ext4_check_inode_blockref(inode) \
362 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
363 EXT4_NDIR_BLOCKS)
364
365 /**
366 * ext4_get_branch - read the chain of indirect blocks leading to data
367 * @inode: inode in question
368 * @depth: depth of the chain (1 - direct pointer, etc.)
369 * @offsets: offsets of pointers in inode/indirect blocks
370 * @chain: place to store the result
371 * @err: here we store the error value
372 *
373 * Function fills the array of triples <key, p, bh> and returns %NULL
374 * if everything went OK or the pointer to the last filled triple
375 * (incomplete one) otherwise. Upon the return chain[i].key contains
376 * the number of (i+1)-th block in the chain (as it is stored in memory,
377 * i.e. little-endian 32-bit), chain[i].p contains the address of that
378 * number (it points into struct inode for i==0 and into the bh->b_data
379 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
380 * block for i>0 and NULL for i==0. In other words, it holds the block
381 * numbers of the chain, addresses they were taken from (and where we can
382 * verify that chain did not change) and buffer_heads hosting these
383 * numbers.
384 *
385 * Function stops when it stumbles upon zero pointer (absent block)
386 * (pointer to last triple returned, *@err == 0)
387 * or when it gets an IO error reading an indirect block
388 * (ditto, *@err == -EIO)
389 * or when it reads all @depth-1 indirect blocks successfully and finds
390 * the whole chain, all way to the data (returns %NULL, *err == 0).
391 *
392 * Need to be called with
393 * down_read(&EXT4_I(inode)->i_data_sem)
394 */
395 static Indirect *ext4_get_branch(struct inode *inode, int depth,
396 ext4_lblk_t *offsets,
397 Indirect chain[4], int *err)
398 {
399 struct super_block *sb = inode->i_sb;
400 Indirect *p = chain;
401 struct buffer_head *bh;
402
403 *err = 0;
404 /* i_data is not going away, no lock needed */
405 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
406 if (!p->key)
407 goto no_block;
408 while (--depth) {
409 bh = sb_getblk(sb, le32_to_cpu(p->key));
410 if (unlikely(!bh))
411 goto failure;
412
413 if (!bh_uptodate_or_lock(bh)) {
414 if (bh_submit_read(bh) < 0) {
415 put_bh(bh);
416 goto failure;
417 }
418 /* validate block references */
419 if (ext4_check_indirect_blockref(inode, bh)) {
420 put_bh(bh);
421 goto failure;
422 }
423 }
424
425 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
426 /* Reader: end */
427 if (!p->key)
428 goto no_block;
429 }
430 return NULL;
431
432 failure:
433 *err = -EIO;
434 no_block:
435 return p;
436 }
437
438 /**
439 * ext4_find_near - find a place for allocation with sufficient locality
440 * @inode: owner
441 * @ind: descriptor of indirect block.
442 *
443 * This function returns the preferred place for block allocation.
444 * It is used when heuristic for sequential allocation fails.
445 * Rules are:
446 * + if there is a block to the left of our position - allocate near it.
447 * + if pointer will live in indirect block - allocate near that block.
448 * + if pointer will live in inode - allocate in the same
449 * cylinder group.
450 *
451 * In the latter case we colour the starting block by the callers PID to
452 * prevent it from clashing with concurrent allocations for a different inode
453 * in the same block group. The PID is used here so that functionally related
454 * files will be close-by on-disk.
455 *
456 * Caller must make sure that @ind is valid and will stay that way.
457 */
458 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
459 {
460 struct ext4_inode_info *ei = EXT4_I(inode);
461 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
462 __le32 *p;
463 ext4_fsblk_t bg_start;
464 ext4_fsblk_t last_block;
465 ext4_grpblk_t colour;
466 ext4_group_t block_group;
467 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
468
469 /* Try to find previous block */
470 for (p = ind->p - 1; p >= start; p--) {
471 if (*p)
472 return le32_to_cpu(*p);
473 }
474
475 /* No such thing, so let's try location of indirect block */
476 if (ind->bh)
477 return ind->bh->b_blocknr;
478
479 /*
480 * It is going to be referred to from the inode itself? OK, just put it
481 * into the same cylinder group then.
482 */
483 block_group = ei->i_block_group;
484 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
485 block_group &= ~(flex_size-1);
486 if (S_ISREG(inode->i_mode))
487 block_group++;
488 }
489 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
490 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
491
492 /*
493 * If we are doing delayed allocation, we don't need take
494 * colour into account.
495 */
496 if (test_opt(inode->i_sb, DELALLOC))
497 return bg_start;
498
499 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
500 colour = (current->pid % 16) *
501 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
502 else
503 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
504 return bg_start + colour;
505 }
506
507 /**
508 * ext4_find_goal - find a preferred place for allocation.
509 * @inode: owner
510 * @block: block we want
511 * @partial: pointer to the last triple within a chain
512 *
513 * Normally this function find the preferred place for block allocation,
514 * returns it.
515 * Because this is only used for non-extent files, we limit the block nr
516 * to 32 bits.
517 */
518 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
519 Indirect *partial)
520 {
521 ext4_fsblk_t goal;
522
523 /*
524 * XXX need to get goal block from mballoc's data structures
525 */
526
527 goal = ext4_find_near(inode, partial);
528 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
529 return goal;
530 }
531
532 /**
533 * ext4_blks_to_allocate: Look up the block map and count the number
534 * of direct blocks need to be allocated for the given branch.
535 *
536 * @branch: chain of indirect blocks
537 * @k: number of blocks need for indirect blocks
538 * @blks: number of data blocks to be mapped.
539 * @blocks_to_boundary: the offset in the indirect block
540 *
541 * return the total number of blocks to be allocate, including the
542 * direct and indirect blocks.
543 */
544 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
545 int blocks_to_boundary)
546 {
547 unsigned int count = 0;
548
549 /*
550 * Simple case, [t,d]Indirect block(s) has not allocated yet
551 * then it's clear blocks on that path have not allocated
552 */
553 if (k > 0) {
554 /* right now we don't handle cross boundary allocation */
555 if (blks < blocks_to_boundary + 1)
556 count += blks;
557 else
558 count += blocks_to_boundary + 1;
559 return count;
560 }
561
562 count++;
563 while (count < blks && count <= blocks_to_boundary &&
564 le32_to_cpu(*(branch[0].p + count)) == 0) {
565 count++;
566 }
567 return count;
568 }
569
570 /**
571 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
572 * @indirect_blks: the number of blocks need to allocate for indirect
573 * blocks
574 *
575 * @new_blocks: on return it will store the new block numbers for
576 * the indirect blocks(if needed) and the first direct block,
577 * @blks: on return it will store the total number of allocated
578 * direct blocks
579 */
580 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
581 ext4_lblk_t iblock, ext4_fsblk_t goal,
582 int indirect_blks, int blks,
583 ext4_fsblk_t new_blocks[4], int *err)
584 {
585 struct ext4_allocation_request ar;
586 int target, i;
587 unsigned long count = 0, blk_allocated = 0;
588 int index = 0;
589 ext4_fsblk_t current_block = 0;
590 int ret = 0;
591
592 /*
593 * Here we try to allocate the requested multiple blocks at once,
594 * on a best-effort basis.
595 * To build a branch, we should allocate blocks for
596 * the indirect blocks(if not allocated yet), and at least
597 * the first direct block of this branch. That's the
598 * minimum number of blocks need to allocate(required)
599 */
600 /* first we try to allocate the indirect blocks */
601 target = indirect_blks;
602 while (target > 0) {
603 count = target;
604 /* allocating blocks for indirect blocks and direct blocks */
605 current_block = ext4_new_meta_blocks(handle, inode,
606 goal, &count, err);
607 if (*err)
608 goto failed_out;
609
610 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
611
612 target -= count;
613 /* allocate blocks for indirect blocks */
614 while (index < indirect_blks && count) {
615 new_blocks[index++] = current_block++;
616 count--;
617 }
618 if (count > 0) {
619 /*
620 * save the new block number
621 * for the first direct block
622 */
623 new_blocks[index] = current_block;
624 printk(KERN_INFO "%s returned more blocks than "
625 "requested\n", __func__);
626 WARN_ON(1);
627 break;
628 }
629 }
630
631 target = blks - count ;
632 blk_allocated = count;
633 if (!target)
634 goto allocated;
635 /* Now allocate data blocks */
636 memset(&ar, 0, sizeof(ar));
637 ar.inode = inode;
638 ar.goal = goal;
639 ar.len = target;
640 ar.logical = iblock;
641 if (S_ISREG(inode->i_mode))
642 /* enable in-core preallocation only for regular files */
643 ar.flags = EXT4_MB_HINT_DATA;
644
645 current_block = ext4_mb_new_blocks(handle, &ar, err);
646 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
647
648 if (*err && (target == blks)) {
649 /*
650 * if the allocation failed and we didn't allocate
651 * any blocks before
652 */
653 goto failed_out;
654 }
655 if (!*err) {
656 if (target == blks) {
657 /*
658 * save the new block number
659 * for the first direct block
660 */
661 new_blocks[index] = current_block;
662 }
663 blk_allocated += ar.len;
664 }
665 allocated:
666 /* total number of blocks allocated for direct blocks */
667 ret = blk_allocated;
668 *err = 0;
669 return ret;
670 failed_out:
671 for (i = 0; i < index; i++)
672 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
673 return ret;
674 }
675
676 /**
677 * ext4_alloc_branch - allocate and set up a chain of blocks.
678 * @inode: owner
679 * @indirect_blks: number of allocated indirect blocks
680 * @blks: number of allocated direct blocks
681 * @offsets: offsets (in the blocks) to store the pointers to next.
682 * @branch: place to store the chain in.
683 *
684 * This function allocates blocks, zeroes out all but the last one,
685 * links them into chain and (if we are synchronous) writes them to disk.
686 * In other words, it prepares a branch that can be spliced onto the
687 * inode. It stores the information about that chain in the branch[], in
688 * the same format as ext4_get_branch() would do. We are calling it after
689 * we had read the existing part of chain and partial points to the last
690 * triple of that (one with zero ->key). Upon the exit we have the same
691 * picture as after the successful ext4_get_block(), except that in one
692 * place chain is disconnected - *branch->p is still zero (we did not
693 * set the last link), but branch->key contains the number that should
694 * be placed into *branch->p to fill that gap.
695 *
696 * If allocation fails we free all blocks we've allocated (and forget
697 * their buffer_heads) and return the error value the from failed
698 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
699 * as described above and return 0.
700 */
701 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
702 ext4_lblk_t iblock, int indirect_blks,
703 int *blks, ext4_fsblk_t goal,
704 ext4_lblk_t *offsets, Indirect *branch)
705 {
706 int blocksize = inode->i_sb->s_blocksize;
707 int i, n = 0;
708 int err = 0;
709 struct buffer_head *bh;
710 int num;
711 ext4_fsblk_t new_blocks[4];
712 ext4_fsblk_t current_block;
713
714 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
715 *blks, new_blocks, &err);
716 if (err)
717 return err;
718
719 branch[0].key = cpu_to_le32(new_blocks[0]);
720 /*
721 * metadata blocks and data blocks are allocated.
722 */
723 for (n = 1; n <= indirect_blks; n++) {
724 /*
725 * Get buffer_head for parent block, zero it out
726 * and set the pointer to new one, then send
727 * parent to disk.
728 */
729 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
730 branch[n].bh = bh;
731 lock_buffer(bh);
732 BUFFER_TRACE(bh, "call get_create_access");
733 err = ext4_journal_get_create_access(handle, bh);
734 if (err) {
735 /* Don't brelse(bh) here; it's done in
736 * ext4_journal_forget() below */
737 unlock_buffer(bh);
738 goto failed;
739 }
740
741 memset(bh->b_data, 0, blocksize);
742 branch[n].p = (__le32 *) bh->b_data + offsets[n];
743 branch[n].key = cpu_to_le32(new_blocks[n]);
744 *branch[n].p = branch[n].key;
745 if (n == indirect_blks) {
746 current_block = new_blocks[n];
747 /*
748 * End of chain, update the last new metablock of
749 * the chain to point to the new allocated
750 * data blocks numbers
751 */
752 for (i = 1; i < num; i++)
753 *(branch[n].p + i) = cpu_to_le32(++current_block);
754 }
755 BUFFER_TRACE(bh, "marking uptodate");
756 set_buffer_uptodate(bh);
757 unlock_buffer(bh);
758
759 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
760 err = ext4_handle_dirty_metadata(handle, inode, bh);
761 if (err)
762 goto failed;
763 }
764 *blks = num;
765 return err;
766 failed:
767 /* Allocation failed, free what we already allocated */
768 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
769 for (i = 1; i <= n ; i++) {
770 /*
771 * branch[i].bh is newly allocated, so there is no
772 * need to revoke the block, which is why we don't
773 * need to set EXT4_FREE_BLOCKS_METADATA.
774 */
775 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
776 EXT4_FREE_BLOCKS_FORGET);
777 }
778 for (i = n+1; i < indirect_blks; i++)
779 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
780
781 ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
782
783 return err;
784 }
785
786 /**
787 * ext4_splice_branch - splice the allocated branch onto inode.
788 * @inode: owner
789 * @block: (logical) number of block we are adding
790 * @chain: chain of indirect blocks (with a missing link - see
791 * ext4_alloc_branch)
792 * @where: location of missing link
793 * @num: number of indirect blocks we are adding
794 * @blks: number of direct blocks we are adding
795 *
796 * This function fills the missing link and does all housekeeping needed in
797 * inode (->i_blocks, etc.). In case of success we end up with the full
798 * chain to new block and return 0.
799 */
800 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
801 ext4_lblk_t block, Indirect *where, int num,
802 int blks)
803 {
804 int i;
805 int err = 0;
806 ext4_fsblk_t current_block;
807
808 /*
809 * If we're splicing into a [td]indirect block (as opposed to the
810 * inode) then we need to get write access to the [td]indirect block
811 * before the splice.
812 */
813 if (where->bh) {
814 BUFFER_TRACE(where->bh, "get_write_access");
815 err = ext4_journal_get_write_access(handle, where->bh);
816 if (err)
817 goto err_out;
818 }
819 /* That's it */
820
821 *where->p = where->key;
822
823 /*
824 * Update the host buffer_head or inode to point to more just allocated
825 * direct blocks blocks
826 */
827 if (num == 0 && blks > 1) {
828 current_block = le32_to_cpu(where->key) + 1;
829 for (i = 1; i < blks; i++)
830 *(where->p + i) = cpu_to_le32(current_block++);
831 }
832
833 /* We are done with atomic stuff, now do the rest of housekeeping */
834 /* had we spliced it onto indirect block? */
835 if (where->bh) {
836 /*
837 * If we spliced it onto an indirect block, we haven't
838 * altered the inode. Note however that if it is being spliced
839 * onto an indirect block at the very end of the file (the
840 * file is growing) then we *will* alter the inode to reflect
841 * the new i_size. But that is not done here - it is done in
842 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
843 */
844 jbd_debug(5, "splicing indirect only\n");
845 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
846 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
847 if (err)
848 goto err_out;
849 } else {
850 /*
851 * OK, we spliced it into the inode itself on a direct block.
852 */
853 ext4_mark_inode_dirty(handle, inode);
854 jbd_debug(5, "splicing direct\n");
855 }
856 return err;
857
858 err_out:
859 for (i = 1; i <= num; i++) {
860 /*
861 * branch[i].bh is newly allocated, so there is no
862 * need to revoke the block, which is why we don't
863 * need to set EXT4_FREE_BLOCKS_METADATA.
864 */
865 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
866 EXT4_FREE_BLOCKS_FORGET);
867 }
868 ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
869 blks, 0);
870
871 return err;
872 }
873
874 /*
875 * The ext4_ind_get_blocks() function handles non-extents inodes
876 * (i.e., using the traditional indirect/double-indirect i_blocks
877 * scheme) for ext4_get_blocks().
878 *
879 * Allocation strategy is simple: if we have to allocate something, we will
880 * have to go the whole way to leaf. So let's do it before attaching anything
881 * to tree, set linkage between the newborn blocks, write them if sync is
882 * required, recheck the path, free and repeat if check fails, otherwise
883 * set the last missing link (that will protect us from any truncate-generated
884 * removals - all blocks on the path are immune now) and possibly force the
885 * write on the parent block.
886 * That has a nice additional property: no special recovery from the failed
887 * allocations is needed - we simply release blocks and do not touch anything
888 * reachable from inode.
889 *
890 * `handle' can be NULL if create == 0.
891 *
892 * return > 0, # of blocks mapped or allocated.
893 * return = 0, if plain lookup failed.
894 * return < 0, error case.
895 *
896 * The ext4_ind_get_blocks() function should be called with
897 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
898 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
899 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
900 * blocks.
901 */
902 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
903 ext4_lblk_t iblock, unsigned int maxblocks,
904 struct buffer_head *bh_result,
905 int flags)
906 {
907 int err = -EIO;
908 ext4_lblk_t offsets[4];
909 Indirect chain[4];
910 Indirect *partial;
911 ext4_fsblk_t goal;
912 int indirect_blks;
913 int blocks_to_boundary = 0;
914 int depth;
915 int count = 0;
916 ext4_fsblk_t first_block = 0;
917
918 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
919 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
920 depth = ext4_block_to_path(inode, iblock, offsets,
921 &blocks_to_boundary);
922
923 if (depth == 0)
924 goto out;
925
926 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
927
928 /* Simplest case - block found, no allocation needed */
929 if (!partial) {
930 first_block = le32_to_cpu(chain[depth - 1].key);
931 clear_buffer_new(bh_result);
932 count++;
933 /*map more blocks*/
934 while (count < maxblocks && count <= blocks_to_boundary) {
935 ext4_fsblk_t blk;
936
937 blk = le32_to_cpu(*(chain[depth-1].p + count));
938
939 if (blk == first_block + count)
940 count++;
941 else
942 break;
943 }
944 goto got_it;
945 }
946
947 /* Next simple case - plain lookup or failed read of indirect block */
948 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
949 goto cleanup;
950
951 /*
952 * Okay, we need to do block allocation.
953 */
954 goal = ext4_find_goal(inode, iblock, partial);
955
956 /* the number of blocks need to allocate for [d,t]indirect blocks */
957 indirect_blks = (chain + depth) - partial - 1;
958
959 /*
960 * Next look up the indirect map to count the totoal number of
961 * direct blocks to allocate for this branch.
962 */
963 count = ext4_blks_to_allocate(partial, indirect_blks,
964 maxblocks, blocks_to_boundary);
965 /*
966 * Block out ext4_truncate while we alter the tree
967 */
968 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
969 &count, goal,
970 offsets + (partial - chain), partial);
971
972 /*
973 * The ext4_splice_branch call will free and forget any buffers
974 * on the new chain if there is a failure, but that risks using
975 * up transaction credits, especially for bitmaps where the
976 * credits cannot be returned. Can we handle this somehow? We
977 * may need to return -EAGAIN upwards in the worst case. --sct
978 */
979 if (!err)
980 err = ext4_splice_branch(handle, inode, iblock,
981 partial, indirect_blks, count);
982 if (err)
983 goto cleanup;
984
985 set_buffer_new(bh_result);
986 got_it:
987 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
988 if (count > blocks_to_boundary)
989 set_buffer_boundary(bh_result);
990 err = count;
991 /* Clean up and exit */
992 partial = chain + depth - 1; /* the whole chain */
993 cleanup:
994 while (partial > chain) {
995 BUFFER_TRACE(partial->bh, "call brelse");
996 brelse(partial->bh);
997 partial--;
998 }
999 BUFFER_TRACE(bh_result, "returned");
1000 out:
1001 return err;
1002 }
1003
1004 qsize_t ext4_get_reserved_space(struct inode *inode)
1005 {
1006 unsigned long long total;
1007
1008 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1009 total = EXT4_I(inode)->i_reserved_data_blocks +
1010 EXT4_I(inode)->i_reserved_meta_blocks;
1011 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1012
1013 return (total << inode->i_blkbits);
1014 }
1015 /*
1016 * Calculate the number of metadata blocks need to reserve
1017 * to allocate @blocks for non extent file based file
1018 */
1019 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
1020 {
1021 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1022 int ind_blks, dind_blks, tind_blks;
1023
1024 /* number of new indirect blocks needed */
1025 ind_blks = (blocks + icap - 1) / icap;
1026
1027 dind_blks = (ind_blks + icap - 1) / icap;
1028
1029 tind_blks = 1;
1030
1031 return ind_blks + dind_blks + tind_blks;
1032 }
1033
1034 /*
1035 * Calculate the number of metadata blocks need to reserve
1036 * to allocate given number of blocks
1037 */
1038 static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1039 {
1040 if (!blocks)
1041 return 0;
1042
1043 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1044 return ext4_ext_calc_metadata_amount(inode, blocks);
1045
1046 return ext4_indirect_calc_metadata_amount(inode, blocks);
1047 }
1048
1049 static void ext4_da_update_reserve_space(struct inode *inode, int used)
1050 {
1051 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1052 int total, mdb, mdb_free;
1053
1054 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1055 /* recalculate the number of metablocks still need to be reserved */
1056 total = EXT4_I(inode)->i_reserved_data_blocks - used;
1057 mdb = ext4_calc_metadata_amount(inode, total);
1058
1059 /* figure out how many metablocks to release */
1060 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1061 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1062
1063 if (mdb_free) {
1064 /* Account for allocated meta_blocks */
1065 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
1066
1067 /* update fs dirty blocks counter */
1068 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1069 EXT4_I(inode)->i_allocated_meta_blocks = 0;
1070 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1071 }
1072
1073 /* update per-inode reservations */
1074 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1075 EXT4_I(inode)->i_reserved_data_blocks -= used;
1076 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1077
1078 /*
1079 * free those over-booking quota for metadata blocks
1080 */
1081 if (mdb_free)
1082 vfs_dq_release_reservation_block(inode, mdb_free);
1083
1084 /*
1085 * If we have done all the pending block allocations and if
1086 * there aren't any writers on the inode, we can discard the
1087 * inode's preallocations.
1088 */
1089 if (!total && (atomic_read(&inode->i_writecount) == 0))
1090 ext4_discard_preallocations(inode);
1091 }
1092
1093 static int check_block_validity(struct inode *inode, const char *msg,
1094 sector_t logical, sector_t phys, int len)
1095 {
1096 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
1097 ext4_error(inode->i_sb, msg,
1098 "inode #%lu logical block %llu mapped to %llu "
1099 "(size %d)", inode->i_ino,
1100 (unsigned long long) logical,
1101 (unsigned long long) phys, len);
1102 return -EIO;
1103 }
1104 return 0;
1105 }
1106
1107 /*
1108 * Return the number of contiguous dirty pages in a given inode
1109 * starting at page frame idx.
1110 */
1111 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1112 unsigned int max_pages)
1113 {
1114 struct address_space *mapping = inode->i_mapping;
1115 pgoff_t index;
1116 struct pagevec pvec;
1117 pgoff_t num = 0;
1118 int i, nr_pages, done = 0;
1119
1120 if (max_pages == 0)
1121 return 0;
1122 pagevec_init(&pvec, 0);
1123 while (!done) {
1124 index = idx;
1125 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1126 PAGECACHE_TAG_DIRTY,
1127 (pgoff_t)PAGEVEC_SIZE);
1128 if (nr_pages == 0)
1129 break;
1130 for (i = 0; i < nr_pages; i++) {
1131 struct page *page = pvec.pages[i];
1132 struct buffer_head *bh, *head;
1133
1134 lock_page(page);
1135 if (unlikely(page->mapping != mapping) ||
1136 !PageDirty(page) ||
1137 PageWriteback(page) ||
1138 page->index != idx) {
1139 done = 1;
1140 unlock_page(page);
1141 break;
1142 }
1143 if (page_has_buffers(page)) {
1144 bh = head = page_buffers(page);
1145 do {
1146 if (!buffer_delay(bh) &&
1147 !buffer_unwritten(bh))
1148 done = 1;
1149 bh = bh->b_this_page;
1150 } while (!done && (bh != head));
1151 }
1152 unlock_page(page);
1153 if (done)
1154 break;
1155 idx++;
1156 num++;
1157 if (num >= max_pages)
1158 break;
1159 }
1160 pagevec_release(&pvec);
1161 }
1162 return num;
1163 }
1164
1165 /*
1166 * The ext4_get_blocks() function tries to look up the requested blocks,
1167 * and returns if the blocks are already mapped.
1168 *
1169 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1170 * and store the allocated blocks in the result buffer head and mark it
1171 * mapped.
1172 *
1173 * If file type is extents based, it will call ext4_ext_get_blocks(),
1174 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
1175 * based files
1176 *
1177 * On success, it returns the number of blocks being mapped or allocate.
1178 * if create==0 and the blocks are pre-allocated and uninitialized block,
1179 * the result buffer head is unmapped. If the create ==1, it will make sure
1180 * the buffer head is mapped.
1181 *
1182 * It returns 0 if plain look up failed (blocks have not been allocated), in
1183 * that casem, buffer head is unmapped
1184 *
1185 * It returns the error in case of allocation failure.
1186 */
1187 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1188 unsigned int max_blocks, struct buffer_head *bh,
1189 int flags)
1190 {
1191 int retval;
1192
1193 clear_buffer_mapped(bh);
1194 clear_buffer_unwritten(bh);
1195
1196 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
1197 "logical block %lu\n", inode->i_ino, flags, max_blocks,
1198 (unsigned long)block);
1199 /*
1200 * Try to see if we can get the block without requesting a new
1201 * file system block.
1202 */
1203 down_read((&EXT4_I(inode)->i_data_sem));
1204 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1205 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1206 bh, 0);
1207 } else {
1208 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
1209 bh, 0);
1210 }
1211 up_read((&EXT4_I(inode)->i_data_sem));
1212
1213 if (retval > 0 && buffer_mapped(bh)) {
1214 int ret = check_block_validity(inode, "file system corruption",
1215 block, bh->b_blocknr, retval);
1216 if (ret != 0)
1217 return ret;
1218 }
1219
1220 /* If it is only a block(s) look up */
1221 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1222 return retval;
1223
1224 /*
1225 * Returns if the blocks have already allocated
1226 *
1227 * Note that if blocks have been preallocated
1228 * ext4_ext_get_block() returns th create = 0
1229 * with buffer head unmapped.
1230 */
1231 if (retval > 0 && buffer_mapped(bh))
1232 return retval;
1233
1234 /*
1235 * When we call get_blocks without the create flag, the
1236 * BH_Unwritten flag could have gotten set if the blocks
1237 * requested were part of a uninitialized extent. We need to
1238 * clear this flag now that we are committed to convert all or
1239 * part of the uninitialized extent to be an initialized
1240 * extent. This is because we need to avoid the combination
1241 * of BH_Unwritten and BH_Mapped flags being simultaneously
1242 * set on the buffer_head.
1243 */
1244 clear_buffer_unwritten(bh);
1245
1246 /*
1247 * New blocks allocate and/or writing to uninitialized extent
1248 * will possibly result in updating i_data, so we take
1249 * the write lock of i_data_sem, and call get_blocks()
1250 * with create == 1 flag.
1251 */
1252 down_write((&EXT4_I(inode)->i_data_sem));
1253
1254 /*
1255 * if the caller is from delayed allocation writeout path
1256 * we have already reserved fs blocks for allocation
1257 * let the underlying get_block() function know to
1258 * avoid double accounting
1259 */
1260 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1261 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1262 /*
1263 * We need to check for EXT4 here because migrate
1264 * could have changed the inode type in between
1265 */
1266 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1267 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1268 bh, flags);
1269 } else {
1270 retval = ext4_ind_get_blocks(handle, inode, block,
1271 max_blocks, bh, flags);
1272
1273 if (retval > 0 && buffer_new(bh)) {
1274 /*
1275 * We allocated new blocks which will result in
1276 * i_data's format changing. Force the migrate
1277 * to fail by clearing migrate flags
1278 */
1279 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
1280 }
1281 }
1282
1283 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1284 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1285
1286 /*
1287 * Update reserved blocks/metadata blocks after successful
1288 * block allocation which had been deferred till now.
1289 */
1290 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
1291 ext4_da_update_reserve_space(inode, retval);
1292
1293 up_write((&EXT4_I(inode)->i_data_sem));
1294 if (retval > 0 && buffer_mapped(bh)) {
1295 int ret = check_block_validity(inode, "file system "
1296 "corruption after allocation",
1297 block, bh->b_blocknr, retval);
1298 if (ret != 0)
1299 return ret;
1300 }
1301 return retval;
1302 }
1303
1304 /* Maximum number of blocks we map for direct IO at once. */
1305 #define DIO_MAX_BLOCKS 4096
1306
1307 int ext4_get_block(struct inode *inode, sector_t iblock,
1308 struct buffer_head *bh_result, int create)
1309 {
1310 handle_t *handle = ext4_journal_current_handle();
1311 int ret = 0, started = 0;
1312 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1313 int dio_credits;
1314
1315 if (create && !handle) {
1316 /* Direct IO write... */
1317 if (max_blocks > DIO_MAX_BLOCKS)
1318 max_blocks = DIO_MAX_BLOCKS;
1319 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1320 handle = ext4_journal_start(inode, dio_credits);
1321 if (IS_ERR(handle)) {
1322 ret = PTR_ERR(handle);
1323 goto out;
1324 }
1325 started = 1;
1326 }
1327
1328 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
1329 create ? EXT4_GET_BLOCKS_CREATE : 0);
1330 if (ret > 0) {
1331 bh_result->b_size = (ret << inode->i_blkbits);
1332 ret = 0;
1333 }
1334 if (started)
1335 ext4_journal_stop(handle);
1336 out:
1337 return ret;
1338 }
1339
1340 /*
1341 * `handle' can be NULL if create is zero
1342 */
1343 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1344 ext4_lblk_t block, int create, int *errp)
1345 {
1346 struct buffer_head dummy;
1347 int fatal = 0, err;
1348 int flags = 0;
1349
1350 J_ASSERT(handle != NULL || create == 0);
1351
1352 dummy.b_state = 0;
1353 dummy.b_blocknr = -1000;
1354 buffer_trace_init(&dummy.b_history);
1355 if (create)
1356 flags |= EXT4_GET_BLOCKS_CREATE;
1357 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
1358 /*
1359 * ext4_get_blocks() returns number of blocks mapped. 0 in
1360 * case of a HOLE.
1361 */
1362 if (err > 0) {
1363 if (err > 1)
1364 WARN_ON(1);
1365 err = 0;
1366 }
1367 *errp = err;
1368 if (!err && buffer_mapped(&dummy)) {
1369 struct buffer_head *bh;
1370 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1371 if (!bh) {
1372 *errp = -EIO;
1373 goto err;
1374 }
1375 if (buffer_new(&dummy)) {
1376 J_ASSERT(create != 0);
1377 J_ASSERT(handle != NULL);
1378
1379 /*
1380 * Now that we do not always journal data, we should
1381 * keep in mind whether this should always journal the
1382 * new buffer as metadata. For now, regular file
1383 * writes use ext4_get_block instead, so it's not a
1384 * problem.
1385 */
1386 lock_buffer(bh);
1387 BUFFER_TRACE(bh, "call get_create_access");
1388 fatal = ext4_journal_get_create_access(handle, bh);
1389 if (!fatal && !buffer_uptodate(bh)) {
1390 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1391 set_buffer_uptodate(bh);
1392 }
1393 unlock_buffer(bh);
1394 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1395 err = ext4_handle_dirty_metadata(handle, inode, bh);
1396 if (!fatal)
1397 fatal = err;
1398 } else {
1399 BUFFER_TRACE(bh, "not a new buffer");
1400 }
1401 if (fatal) {
1402 *errp = fatal;
1403 brelse(bh);
1404 bh = NULL;
1405 }
1406 return bh;
1407 }
1408 err:
1409 return NULL;
1410 }
1411
1412 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1413 ext4_lblk_t block, int create, int *err)
1414 {
1415 struct buffer_head *bh;
1416
1417 bh = ext4_getblk(handle, inode, block, create, err);
1418 if (!bh)
1419 return bh;
1420 if (buffer_uptodate(bh))
1421 return bh;
1422 ll_rw_block(READ_META, 1, &bh);
1423 wait_on_buffer(bh);
1424 if (buffer_uptodate(bh))
1425 return bh;
1426 put_bh(bh);
1427 *err = -EIO;
1428 return NULL;
1429 }
1430
1431 static int walk_page_buffers(handle_t *handle,
1432 struct buffer_head *head,
1433 unsigned from,
1434 unsigned to,
1435 int *partial,
1436 int (*fn)(handle_t *handle,
1437 struct buffer_head *bh))
1438 {
1439 struct buffer_head *bh;
1440 unsigned block_start, block_end;
1441 unsigned blocksize = head->b_size;
1442 int err, ret = 0;
1443 struct buffer_head *next;
1444
1445 for (bh = head, block_start = 0;
1446 ret == 0 && (bh != head || !block_start);
1447 block_start = block_end, bh = next) {
1448 next = bh->b_this_page;
1449 block_end = block_start + blocksize;
1450 if (block_end <= from || block_start >= to) {
1451 if (partial && !buffer_uptodate(bh))
1452 *partial = 1;
1453 continue;
1454 }
1455 err = (*fn)(handle, bh);
1456 if (!ret)
1457 ret = err;
1458 }
1459 return ret;
1460 }
1461
1462 /*
1463 * To preserve ordering, it is essential that the hole instantiation and
1464 * the data write be encapsulated in a single transaction. We cannot
1465 * close off a transaction and start a new one between the ext4_get_block()
1466 * and the commit_write(). So doing the jbd2_journal_start at the start of
1467 * prepare_write() is the right place.
1468 *
1469 * Also, this function can nest inside ext4_writepage() ->
1470 * block_write_full_page(). In that case, we *know* that ext4_writepage()
1471 * has generated enough buffer credits to do the whole page. So we won't
1472 * block on the journal in that case, which is good, because the caller may
1473 * be PF_MEMALLOC.
1474 *
1475 * By accident, ext4 can be reentered when a transaction is open via
1476 * quota file writes. If we were to commit the transaction while thus
1477 * reentered, there can be a deadlock - we would be holding a quota
1478 * lock, and the commit would never complete if another thread had a
1479 * transaction open and was blocking on the quota lock - a ranking
1480 * violation.
1481 *
1482 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1483 * will _not_ run commit under these circumstances because handle->h_ref
1484 * is elevated. We'll still have enough credits for the tiny quotafile
1485 * write.
1486 */
1487 static int do_journal_get_write_access(handle_t *handle,
1488 struct buffer_head *bh)
1489 {
1490 if (!buffer_mapped(bh) || buffer_freed(bh))
1491 return 0;
1492 return ext4_journal_get_write_access(handle, bh);
1493 }
1494
1495 /*
1496 * Truncate blocks that were not used by write. We have to truncate the
1497 * pagecache as well so that corresponding buffers get properly unmapped.
1498 */
1499 static void ext4_truncate_failed_write(struct inode *inode)
1500 {
1501 truncate_inode_pages(inode->i_mapping, inode->i_size);
1502 ext4_truncate(inode);
1503 }
1504
1505 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1506 loff_t pos, unsigned len, unsigned flags,
1507 struct page **pagep, void **fsdata)
1508 {
1509 struct inode *inode = mapping->host;
1510 int ret, needed_blocks;
1511 handle_t *handle;
1512 int retries = 0;
1513 struct page *page;
1514 pgoff_t index;
1515 unsigned from, to;
1516
1517 trace_ext4_write_begin(inode, pos, len, flags);
1518 /*
1519 * Reserve one block more for addition to orphan list in case
1520 * we allocate blocks but write fails for some reason
1521 */
1522 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1523 index = pos >> PAGE_CACHE_SHIFT;
1524 from = pos & (PAGE_CACHE_SIZE - 1);
1525 to = from + len;
1526
1527 retry:
1528 handle = ext4_journal_start(inode, needed_blocks);
1529 if (IS_ERR(handle)) {
1530 ret = PTR_ERR(handle);
1531 goto out;
1532 }
1533
1534 /* We cannot recurse into the filesystem as the transaction is already
1535 * started */
1536 flags |= AOP_FLAG_NOFS;
1537
1538 page = grab_cache_page_write_begin(mapping, index, flags);
1539 if (!page) {
1540 ext4_journal_stop(handle);
1541 ret = -ENOMEM;
1542 goto out;
1543 }
1544 *pagep = page;
1545
1546 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1547 ext4_get_block);
1548
1549 if (!ret && ext4_should_journal_data(inode)) {
1550 ret = walk_page_buffers(handle, page_buffers(page),
1551 from, to, NULL, do_journal_get_write_access);
1552 }
1553
1554 if (ret) {
1555 unlock_page(page);
1556 page_cache_release(page);
1557 /*
1558 * block_write_begin may have instantiated a few blocks
1559 * outside i_size. Trim these off again. Don't need
1560 * i_size_read because we hold i_mutex.
1561 *
1562 * Add inode to orphan list in case we crash before
1563 * truncate finishes
1564 */
1565 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1566 ext4_orphan_add(handle, inode);
1567
1568 ext4_journal_stop(handle);
1569 if (pos + len > inode->i_size) {
1570 ext4_truncate_failed_write(inode);
1571 /*
1572 * If truncate failed early the inode might
1573 * still be on the orphan list; we need to
1574 * make sure the inode is removed from the
1575 * orphan list in that case.
1576 */
1577 if (inode->i_nlink)
1578 ext4_orphan_del(NULL, inode);
1579 }
1580 }
1581
1582 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1583 goto retry;
1584 out:
1585 return ret;
1586 }
1587
1588 /* For write_end() in data=journal mode */
1589 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1590 {
1591 if (!buffer_mapped(bh) || buffer_freed(bh))
1592 return 0;
1593 set_buffer_uptodate(bh);
1594 return ext4_handle_dirty_metadata(handle, NULL, bh);
1595 }
1596
1597 static int ext4_generic_write_end(struct file *file,
1598 struct address_space *mapping,
1599 loff_t pos, unsigned len, unsigned copied,
1600 struct page *page, void *fsdata)
1601 {
1602 int i_size_changed = 0;
1603 struct inode *inode = mapping->host;
1604 handle_t *handle = ext4_journal_current_handle();
1605
1606 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1607
1608 /*
1609 * No need to use i_size_read() here, the i_size
1610 * cannot change under us because we hold i_mutex.
1611 *
1612 * But it's important to update i_size while still holding page lock:
1613 * page writeout could otherwise come in and zero beyond i_size.
1614 */
1615 if (pos + copied > inode->i_size) {
1616 i_size_write(inode, pos + copied);
1617 i_size_changed = 1;
1618 }
1619
1620 if (pos + copied > EXT4_I(inode)->i_disksize) {
1621 /* We need to mark inode dirty even if
1622 * new_i_size is less that inode->i_size
1623 * bu greater than i_disksize.(hint delalloc)
1624 */
1625 ext4_update_i_disksize(inode, (pos + copied));
1626 i_size_changed = 1;
1627 }
1628 unlock_page(page);
1629 page_cache_release(page);
1630
1631 /*
1632 * Don't mark the inode dirty under page lock. First, it unnecessarily
1633 * makes the holding time of page lock longer. Second, it forces lock
1634 * ordering of page lock and transaction start for journaling
1635 * filesystems.
1636 */
1637 if (i_size_changed)
1638 ext4_mark_inode_dirty(handle, inode);
1639
1640 return copied;
1641 }
1642
1643 /*
1644 * We need to pick up the new inode size which generic_commit_write gave us
1645 * `file' can be NULL - eg, when called from page_symlink().
1646 *
1647 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1648 * buffers are managed internally.
1649 */
1650 static int ext4_ordered_write_end(struct file *file,
1651 struct address_space *mapping,
1652 loff_t pos, unsigned len, unsigned copied,
1653 struct page *page, void *fsdata)
1654 {
1655 handle_t *handle = ext4_journal_current_handle();
1656 struct inode *inode = mapping->host;
1657 int ret = 0, ret2;
1658
1659 trace_ext4_ordered_write_end(inode, pos, len, copied);
1660 ret = ext4_jbd2_file_inode(handle, inode);
1661
1662 if (ret == 0) {
1663 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1664 page, fsdata);
1665 copied = ret2;
1666 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1667 /* if we have allocated more blocks and copied
1668 * less. We will have blocks allocated outside
1669 * inode->i_size. So truncate them
1670 */
1671 ext4_orphan_add(handle, inode);
1672 if (ret2 < 0)
1673 ret = ret2;
1674 }
1675 ret2 = ext4_journal_stop(handle);
1676 if (!ret)
1677 ret = ret2;
1678
1679 if (pos + len > inode->i_size) {
1680 ext4_truncate_failed_write(inode);
1681 /*
1682 * If truncate failed early the inode might still be
1683 * on the orphan list; we need to make sure the inode
1684 * is removed from the orphan list in that case.
1685 */
1686 if (inode->i_nlink)
1687 ext4_orphan_del(NULL, inode);
1688 }
1689
1690
1691 return ret ? ret : copied;
1692 }
1693
1694 static int ext4_writeback_write_end(struct file *file,
1695 struct address_space *mapping,
1696 loff_t pos, unsigned len, unsigned copied,
1697 struct page *page, void *fsdata)
1698 {
1699 handle_t *handle = ext4_journal_current_handle();
1700 struct inode *inode = mapping->host;
1701 int ret = 0, ret2;
1702
1703 trace_ext4_writeback_write_end(inode, pos, len, copied);
1704 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1705 page, fsdata);
1706 copied = ret2;
1707 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1708 /* if we have allocated more blocks and copied
1709 * less. We will have blocks allocated outside
1710 * inode->i_size. So truncate them
1711 */
1712 ext4_orphan_add(handle, inode);
1713
1714 if (ret2 < 0)
1715 ret = ret2;
1716
1717 ret2 = ext4_journal_stop(handle);
1718 if (!ret)
1719 ret = ret2;
1720
1721 if (pos + len > inode->i_size) {
1722 ext4_truncate_failed_write(inode);
1723 /*
1724 * If truncate failed early the inode might still be
1725 * on the orphan list; we need to make sure the inode
1726 * is removed from the orphan list in that case.
1727 */
1728 if (inode->i_nlink)
1729 ext4_orphan_del(NULL, inode);
1730 }
1731
1732 return ret ? ret : copied;
1733 }
1734
1735 static int ext4_journalled_write_end(struct file *file,
1736 struct address_space *mapping,
1737 loff_t pos, unsigned len, unsigned copied,
1738 struct page *page, void *fsdata)
1739 {
1740 handle_t *handle = ext4_journal_current_handle();
1741 struct inode *inode = mapping->host;
1742 int ret = 0, ret2;
1743 int partial = 0;
1744 unsigned from, to;
1745 loff_t new_i_size;
1746
1747 trace_ext4_journalled_write_end(inode, pos, len, copied);
1748 from = pos & (PAGE_CACHE_SIZE - 1);
1749 to = from + len;
1750
1751 if (copied < len) {
1752 if (!PageUptodate(page))
1753 copied = 0;
1754 page_zero_new_buffers(page, from+copied, to);
1755 }
1756
1757 ret = walk_page_buffers(handle, page_buffers(page), from,
1758 to, &partial, write_end_fn);
1759 if (!partial)
1760 SetPageUptodate(page);
1761 new_i_size = pos + copied;
1762 if (new_i_size > inode->i_size)
1763 i_size_write(inode, pos+copied);
1764 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1765 if (new_i_size > EXT4_I(inode)->i_disksize) {
1766 ext4_update_i_disksize(inode, new_i_size);
1767 ret2 = ext4_mark_inode_dirty(handle, inode);
1768 if (!ret)
1769 ret = ret2;
1770 }
1771
1772 unlock_page(page);
1773 page_cache_release(page);
1774 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1775 /* if we have allocated more blocks and copied
1776 * less. We will have blocks allocated outside
1777 * inode->i_size. So truncate them
1778 */
1779 ext4_orphan_add(handle, inode);
1780
1781 ret2 = ext4_journal_stop(handle);
1782 if (!ret)
1783 ret = ret2;
1784 if (pos + len > inode->i_size) {
1785 ext4_truncate_failed_write(inode);
1786 /*
1787 * If truncate failed early the inode might still be
1788 * on the orphan list; we need to make sure the inode
1789 * is removed from the orphan list in that case.
1790 */
1791 if (inode->i_nlink)
1792 ext4_orphan_del(NULL, inode);
1793 }
1794
1795 return ret ? ret : copied;
1796 }
1797
1798 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1799 {
1800 int retries = 0;
1801 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1802 unsigned long md_needed, mdblocks, total = 0;
1803
1804 /*
1805 * recalculate the amount of metadata blocks to reserve
1806 * in order to allocate nrblocks
1807 * worse case is one extent per block
1808 */
1809 repeat:
1810 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1811 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1812 mdblocks = ext4_calc_metadata_amount(inode, total);
1813 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1814
1815 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1816 total = md_needed + nrblocks;
1817
1818 /*
1819 * Make quota reservation here to prevent quota overflow
1820 * later. Real quota accounting is done at pages writeout
1821 * time.
1822 */
1823 if (vfs_dq_reserve_block(inode, total)) {
1824 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1825 return -EDQUOT;
1826 }
1827
1828 if (ext4_claim_free_blocks(sbi, total)) {
1829 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1830 vfs_dq_release_reservation_block(inode, total);
1831 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1832 yield();
1833 goto repeat;
1834 }
1835 return -ENOSPC;
1836 }
1837 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1838 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1839
1840 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1841 return 0; /* success */
1842 }
1843
1844 static void ext4_da_release_space(struct inode *inode, int to_free)
1845 {
1846 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1847 int total, mdb, mdb_free, release;
1848
1849 if (!to_free)
1850 return; /* Nothing to release, exit */
1851
1852 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1853
1854 if (!EXT4_I(inode)->i_reserved_data_blocks) {
1855 /*
1856 * if there is no reserved blocks, but we try to free some
1857 * then the counter is messed up somewhere.
1858 * but since this function is called from invalidate
1859 * page, it's harmless to return without any action
1860 */
1861 printk(KERN_INFO "ext4 delalloc try to release %d reserved "
1862 "blocks for inode %lu, but there is no reserved "
1863 "data blocks\n", to_free, inode->i_ino);
1864 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1865 return;
1866 }
1867
1868 /* recalculate the number of metablocks still need to be reserved */
1869 total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
1870 mdb = ext4_calc_metadata_amount(inode, total);
1871
1872 /* figure out how many metablocks to release */
1873 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1874 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1875
1876 release = to_free + mdb_free;
1877
1878 /* update fs dirty blocks counter for truncate case */
1879 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
1880
1881 /* update per-inode reservations */
1882 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
1883 EXT4_I(inode)->i_reserved_data_blocks -= to_free;
1884
1885 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1886 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1887 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1888
1889 vfs_dq_release_reservation_block(inode, release);
1890 }
1891
1892 static void ext4_da_page_release_reservation(struct page *page,
1893 unsigned long offset)
1894 {
1895 int to_release = 0;
1896 struct buffer_head *head, *bh;
1897 unsigned int curr_off = 0;
1898
1899 head = page_buffers(page);
1900 bh = head;
1901 do {
1902 unsigned int next_off = curr_off + bh->b_size;
1903
1904 if ((offset <= curr_off) && (buffer_delay(bh))) {
1905 to_release++;
1906 clear_buffer_delay(bh);
1907 }
1908 curr_off = next_off;
1909 } while ((bh = bh->b_this_page) != head);
1910 ext4_da_release_space(page->mapping->host, to_release);
1911 }
1912
1913 /*
1914 * Delayed allocation stuff
1915 */
1916
1917 /*
1918 * mpage_da_submit_io - walks through extent of pages and try to write
1919 * them with writepage() call back
1920 *
1921 * @mpd->inode: inode
1922 * @mpd->first_page: first page of the extent
1923 * @mpd->next_page: page after the last page of the extent
1924 *
1925 * By the time mpage_da_submit_io() is called we expect all blocks
1926 * to be allocated. this may be wrong if allocation failed.
1927 *
1928 * As pages are already locked by write_cache_pages(), we can't use it
1929 */
1930 static int mpage_da_submit_io(struct mpage_da_data *mpd)
1931 {
1932 long pages_skipped;
1933 struct pagevec pvec;
1934 unsigned long index, end;
1935 int ret = 0, err, nr_pages, i;
1936 struct inode *inode = mpd->inode;
1937 struct address_space *mapping = inode->i_mapping;
1938
1939 BUG_ON(mpd->next_page <= mpd->first_page);
1940 /*
1941 * We need to start from the first_page to the next_page - 1
1942 * to make sure we also write the mapped dirty buffer_heads.
1943 * If we look at mpd->b_blocknr we would only be looking
1944 * at the currently mapped buffer_heads.
1945 */
1946 index = mpd->first_page;
1947 end = mpd->next_page - 1;
1948
1949 pagevec_init(&pvec, 0);
1950 while (index <= end) {
1951 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1952 if (nr_pages == 0)
1953 break;
1954 for (i = 0; i < nr_pages; i++) {
1955 struct page *page = pvec.pages[i];
1956
1957 index = page->index;
1958 if (index > end)
1959 break;
1960 index++;
1961
1962 BUG_ON(!PageLocked(page));
1963 BUG_ON(PageWriteback(page));
1964
1965 pages_skipped = mpd->wbc->pages_skipped;
1966 err = mapping->a_ops->writepage(page, mpd->wbc);
1967 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
1968 /*
1969 * have successfully written the page
1970 * without skipping the same
1971 */
1972 mpd->pages_written++;
1973 /*
1974 * In error case, we have to continue because
1975 * remaining pages are still locked
1976 * XXX: unlock and re-dirty them?
1977 */
1978 if (ret == 0)
1979 ret = err;
1980 }
1981 pagevec_release(&pvec);
1982 }
1983 return ret;
1984 }
1985
1986 /*
1987 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1988 *
1989 * @mpd->inode - inode to walk through
1990 * @exbh->b_blocknr - first block on a disk
1991 * @exbh->b_size - amount of space in bytes
1992 * @logical - first logical block to start assignment with
1993 *
1994 * the function goes through all passed space and put actual disk
1995 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
1996 */
1997 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1998 struct buffer_head *exbh)
1999 {
2000 struct inode *inode = mpd->inode;
2001 struct address_space *mapping = inode->i_mapping;
2002 int blocks = exbh->b_size >> inode->i_blkbits;
2003 sector_t pblock = exbh->b_blocknr, cur_logical;
2004 struct buffer_head *head, *bh;
2005 pgoff_t index, end;
2006 struct pagevec pvec;
2007 int nr_pages, i;
2008
2009 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2010 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2011 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2012
2013 pagevec_init(&pvec, 0);
2014
2015 while (index <= end) {
2016 /* XXX: optimize tail */
2017 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2018 if (nr_pages == 0)
2019 break;
2020 for (i = 0; i < nr_pages; i++) {
2021 struct page *page = pvec.pages[i];
2022
2023 index = page->index;
2024 if (index > end)
2025 break;
2026 index++;
2027
2028 BUG_ON(!PageLocked(page));
2029 BUG_ON(PageWriteback(page));
2030 BUG_ON(!page_has_buffers(page));
2031
2032 bh = page_buffers(page);
2033 head = bh;
2034
2035 /* skip blocks out of the range */
2036 do {
2037 if (cur_logical >= logical)
2038 break;
2039 cur_logical++;
2040 } while ((bh = bh->b_this_page) != head);
2041
2042 do {
2043 if (cur_logical >= logical + blocks)
2044 break;
2045
2046 if (buffer_delay(bh) ||
2047 buffer_unwritten(bh)) {
2048
2049 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2050
2051 if (buffer_delay(bh)) {
2052 clear_buffer_delay(bh);
2053 bh->b_blocknr = pblock;
2054 } else {
2055 /*
2056 * unwritten already should have
2057 * blocknr assigned. Verify that
2058 */
2059 clear_buffer_unwritten(bh);
2060 BUG_ON(bh->b_blocknr != pblock);
2061 }
2062
2063 } else if (buffer_mapped(bh))
2064 BUG_ON(bh->b_blocknr != pblock);
2065
2066 cur_logical++;
2067 pblock++;
2068 } while ((bh = bh->b_this_page) != head);
2069 }
2070 pagevec_release(&pvec);
2071 }
2072 }
2073
2074
2075 /*
2076 * __unmap_underlying_blocks - just a helper function to unmap
2077 * set of blocks described by @bh
2078 */
2079 static inline void __unmap_underlying_blocks(struct inode *inode,
2080 struct buffer_head *bh)
2081 {
2082 struct block_device *bdev = inode->i_sb->s_bdev;
2083 int blocks, i;
2084
2085 blocks = bh->b_size >> inode->i_blkbits;
2086 for (i = 0; i < blocks; i++)
2087 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
2088 }
2089
2090 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2091 sector_t logical, long blk_cnt)
2092 {
2093 int nr_pages, i;
2094 pgoff_t index, end;
2095 struct pagevec pvec;
2096 struct inode *inode = mpd->inode;
2097 struct address_space *mapping = inode->i_mapping;
2098
2099 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2100 end = (logical + blk_cnt - 1) >>
2101 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2102 while (index <= end) {
2103 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2104 if (nr_pages == 0)
2105 break;
2106 for (i = 0; i < nr_pages; i++) {
2107 struct page *page = pvec.pages[i];
2108 index = page->index;
2109 if (index > end)
2110 break;
2111 index++;
2112
2113 BUG_ON(!PageLocked(page));
2114 BUG_ON(PageWriteback(page));
2115 block_invalidatepage(page, 0);
2116 ClearPageUptodate(page);
2117 unlock_page(page);
2118 }
2119 }
2120 return;
2121 }
2122
2123 static void ext4_print_free_blocks(struct inode *inode)
2124 {
2125 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2126 printk(KERN_CRIT "Total free blocks count %lld\n",
2127 ext4_count_free_blocks(inode->i_sb));
2128 printk(KERN_CRIT "Free/Dirty block details\n");
2129 printk(KERN_CRIT "free_blocks=%lld\n",
2130 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2131 printk(KERN_CRIT "dirty_blocks=%lld\n",
2132 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2133 printk(KERN_CRIT "Block reservation details\n");
2134 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2135 EXT4_I(inode)->i_reserved_data_blocks);
2136 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2137 EXT4_I(inode)->i_reserved_meta_blocks);
2138 return;
2139 }
2140
2141 /*
2142 * mpage_da_map_blocks - go through given space
2143 *
2144 * @mpd - bh describing space
2145 *
2146 * The function skips space we know is already mapped to disk blocks.
2147 *
2148 */
2149 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2150 {
2151 int err, blks, get_blocks_flags;
2152 struct buffer_head new;
2153 sector_t next = mpd->b_blocknr;
2154 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2155 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2156 handle_t *handle = NULL;
2157
2158 /*
2159 * We consider only non-mapped and non-allocated blocks
2160 */
2161 if ((mpd->b_state & (1 << BH_Mapped)) &&
2162 !(mpd->b_state & (1 << BH_Delay)) &&
2163 !(mpd->b_state & (1 << BH_Unwritten)))
2164 return 0;
2165
2166 /*
2167 * If we didn't accumulate anything to write simply return
2168 */
2169 if (!mpd->b_size)
2170 return 0;
2171
2172 handle = ext4_journal_current_handle();
2173 BUG_ON(!handle);
2174
2175 /*
2176 * Call ext4_get_blocks() to allocate any delayed allocation
2177 * blocks, or to convert an uninitialized extent to be
2178 * initialized (in the case where we have written into
2179 * one or more preallocated blocks).
2180 *
2181 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2182 * indicate that we are on the delayed allocation path. This
2183 * affects functions in many different parts of the allocation
2184 * call path. This flag exists primarily because we don't
2185 * want to change *many* call functions, so ext4_get_blocks()
2186 * will set the magic i_delalloc_reserved_flag once the
2187 * inode's allocation semaphore is taken.
2188 *
2189 * If the blocks in questions were delalloc blocks, set
2190 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2191 * variables are updated after the blocks have been allocated.
2192 */
2193 new.b_state = 0;
2194 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
2195 EXT4_GET_BLOCKS_DELALLOC_RESERVE);
2196 if (mpd->b_state & (1 << BH_Delay))
2197 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
2198 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
2199 &new, get_blocks_flags);
2200 if (blks < 0) {
2201 err = blks;
2202 /*
2203 * If get block returns with error we simply
2204 * return. Later writepage will redirty the page and
2205 * writepages will find the dirty page again
2206 */
2207 if (err == -EAGAIN)
2208 return 0;
2209
2210 if (err == -ENOSPC &&
2211 ext4_count_free_blocks(mpd->inode->i_sb)) {
2212 mpd->retval = err;
2213 return 0;
2214 }
2215
2216 /*
2217 * get block failure will cause us to loop in
2218 * writepages, because a_ops->writepage won't be able
2219 * to make progress. The page will be redirtied by
2220 * writepage and writepages will again try to write
2221 * the same.
2222 */
2223 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2224 "delayed block allocation failed for inode %lu at "
2225 "logical offset %llu with max blocks %zd with "
2226 "error %d\n", mpd->inode->i_ino,
2227 (unsigned long long) next,
2228 mpd->b_size >> mpd->inode->i_blkbits, err);
2229 printk(KERN_CRIT "This should not happen!! "
2230 "Data will be lost\n");
2231 if (err == -ENOSPC) {
2232 ext4_print_free_blocks(mpd->inode);
2233 }
2234 /* invalidate all the pages */
2235 ext4_da_block_invalidatepages(mpd, next,
2236 mpd->b_size >> mpd->inode->i_blkbits);
2237 return err;
2238 }
2239 BUG_ON(blks == 0);
2240
2241 new.b_size = (blks << mpd->inode->i_blkbits);
2242
2243 if (buffer_new(&new))
2244 __unmap_underlying_blocks(mpd->inode, &new);
2245
2246 /*
2247 * If blocks are delayed marked, we need to
2248 * put actual blocknr and drop delayed bit
2249 */
2250 if ((mpd->b_state & (1 << BH_Delay)) ||
2251 (mpd->b_state & (1 << BH_Unwritten)))
2252 mpage_put_bnr_to_bhs(mpd, next, &new);
2253
2254 if (ext4_should_order_data(mpd->inode)) {
2255 err = ext4_jbd2_file_inode(handle, mpd->inode);
2256 if (err)
2257 return err;
2258 }
2259
2260 /*
2261 * Update on-disk size along with block allocation.
2262 */
2263 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2264 if (disksize > i_size_read(mpd->inode))
2265 disksize = i_size_read(mpd->inode);
2266 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2267 ext4_update_i_disksize(mpd->inode, disksize);
2268 return ext4_mark_inode_dirty(handle, mpd->inode);
2269 }
2270
2271 return 0;
2272 }
2273
2274 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2275 (1 << BH_Delay) | (1 << BH_Unwritten))
2276
2277 /*
2278 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2279 *
2280 * @mpd->lbh - extent of blocks
2281 * @logical - logical number of the block in the file
2282 * @bh - bh of the block (used to access block's state)
2283 *
2284 * the function is used to collect contig. blocks in same state
2285 */
2286 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2287 sector_t logical, size_t b_size,
2288 unsigned long b_state)
2289 {
2290 sector_t next;
2291 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2292
2293 /* check if thereserved journal credits might overflow */
2294 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
2295 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2296 /*
2297 * With non-extent format we are limited by the journal
2298 * credit available. Total credit needed to insert
2299 * nrblocks contiguous blocks is dependent on the
2300 * nrblocks. So limit nrblocks.
2301 */
2302 goto flush_it;
2303 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2304 EXT4_MAX_TRANS_DATA) {
2305 /*
2306 * Adding the new buffer_head would make it cross the
2307 * allowed limit for which we have journal credit
2308 * reserved. So limit the new bh->b_size
2309 */
2310 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2311 mpd->inode->i_blkbits;
2312 /* we will do mpage_da_submit_io in the next loop */
2313 }
2314 }
2315 /*
2316 * First block in the extent
2317 */
2318 if (mpd->b_size == 0) {
2319 mpd->b_blocknr = logical;
2320 mpd->b_size = b_size;
2321 mpd->b_state = b_state & BH_FLAGS;
2322 return;
2323 }
2324
2325 next = mpd->b_blocknr + nrblocks;
2326 /*
2327 * Can we merge the block to our big extent?
2328 */
2329 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2330 mpd->b_size += b_size;
2331 return;
2332 }
2333
2334 flush_it:
2335 /*
2336 * We couldn't merge the block to our extent, so we
2337 * need to flush current extent and start new one
2338 */
2339 if (mpage_da_map_blocks(mpd) == 0)
2340 mpage_da_submit_io(mpd);
2341 mpd->io_done = 1;
2342 return;
2343 }
2344
2345 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2346 {
2347 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2348 }
2349
2350 /*
2351 * __mpage_da_writepage - finds extent of pages and blocks
2352 *
2353 * @page: page to consider
2354 * @wbc: not used, we just follow rules
2355 * @data: context
2356 *
2357 * The function finds extents of pages and scan them for all blocks.
2358 */
2359 static int __mpage_da_writepage(struct page *page,
2360 struct writeback_control *wbc, void *data)
2361 {
2362 struct mpage_da_data *mpd = data;
2363 struct inode *inode = mpd->inode;
2364 struct buffer_head *bh, *head;
2365 sector_t logical;
2366
2367 if (mpd->io_done) {
2368 /*
2369 * Rest of the page in the page_vec
2370 * redirty then and skip then. We will
2371 * try to write them again after
2372 * starting a new transaction
2373 */
2374 redirty_page_for_writepage(wbc, page);
2375 unlock_page(page);
2376 return MPAGE_DA_EXTENT_TAIL;
2377 }
2378 /*
2379 * Can we merge this page to current extent?
2380 */
2381 if (mpd->next_page != page->index) {
2382 /*
2383 * Nope, we can't. So, we map non-allocated blocks
2384 * and start IO on them using writepage()
2385 */
2386 if (mpd->next_page != mpd->first_page) {
2387 if (mpage_da_map_blocks(mpd) == 0)
2388 mpage_da_submit_io(mpd);
2389 /*
2390 * skip rest of the page in the page_vec
2391 */
2392 mpd->io_done = 1;
2393 redirty_page_for_writepage(wbc, page);
2394 unlock_page(page);
2395 return MPAGE_DA_EXTENT_TAIL;
2396 }
2397
2398 /*
2399 * Start next extent of pages ...
2400 */
2401 mpd->first_page = page->index;
2402
2403 /*
2404 * ... and blocks
2405 */
2406 mpd->b_size = 0;
2407 mpd->b_state = 0;
2408 mpd->b_blocknr = 0;
2409 }
2410
2411 mpd->next_page = page->index + 1;
2412 logical = (sector_t) page->index <<
2413 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2414
2415 if (!page_has_buffers(page)) {
2416 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2417 (1 << BH_Dirty) | (1 << BH_Uptodate));
2418 if (mpd->io_done)
2419 return MPAGE_DA_EXTENT_TAIL;
2420 } else {
2421 /*
2422 * Page with regular buffer heads, just add all dirty ones
2423 */
2424 head = page_buffers(page);
2425 bh = head;
2426 do {
2427 BUG_ON(buffer_locked(bh));
2428 /*
2429 * We need to try to allocate
2430 * unmapped blocks in the same page.
2431 * Otherwise we won't make progress
2432 * with the page in ext4_writepage
2433 */
2434 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2435 mpage_add_bh_to_extent(mpd, logical,
2436 bh->b_size,
2437 bh->b_state);
2438 if (mpd->io_done)
2439 return MPAGE_DA_EXTENT_TAIL;
2440 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2441 /*
2442 * mapped dirty buffer. We need to update
2443 * the b_state because we look at
2444 * b_state in mpage_da_map_blocks. We don't
2445 * update b_size because if we find an
2446 * unmapped buffer_head later we need to
2447 * use the b_state flag of that buffer_head.
2448 */
2449 if (mpd->b_size == 0)
2450 mpd->b_state = bh->b_state & BH_FLAGS;
2451 }
2452 logical++;
2453 } while ((bh = bh->b_this_page) != head);
2454 }
2455
2456 return 0;
2457 }
2458
2459 /*
2460 * This is a special get_blocks_t callback which is used by
2461 * ext4_da_write_begin(). It will either return mapped block or
2462 * reserve space for a single block.
2463 *
2464 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2465 * We also have b_blocknr = -1 and b_bdev initialized properly
2466 *
2467 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2468 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2469 * initialized properly.
2470 */
2471 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2472 struct buffer_head *bh_result, int create)
2473 {
2474 int ret = 0;
2475 sector_t invalid_block = ~((sector_t) 0xffff);
2476
2477 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2478 invalid_block = ~0;
2479
2480 BUG_ON(create == 0);
2481 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2482
2483 /*
2484 * first, we need to know whether the block is allocated already
2485 * preallocated blocks are unmapped but should treated
2486 * the same as allocated blocks.
2487 */
2488 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
2489 if ((ret == 0) && !buffer_delay(bh_result)) {
2490 /* the block isn't (pre)allocated yet, let's reserve space */
2491 /*
2492 * XXX: __block_prepare_write() unmaps passed block,
2493 * is it OK?
2494 */
2495 ret = ext4_da_reserve_space(inode, 1);
2496 if (ret)
2497 /* not enough space to reserve */
2498 return ret;
2499
2500 map_bh(bh_result, inode->i_sb, invalid_block);
2501 set_buffer_new(bh_result);
2502 set_buffer_delay(bh_result);
2503 } else if (ret > 0) {
2504 bh_result->b_size = (ret << inode->i_blkbits);
2505 if (buffer_unwritten(bh_result)) {
2506 /* A delayed write to unwritten bh should
2507 * be marked new and mapped. Mapped ensures
2508 * that we don't do get_block multiple times
2509 * when we write to the same offset and new
2510 * ensures that we do proper zero out for
2511 * partial write.
2512 */
2513 set_buffer_new(bh_result);
2514 set_buffer_mapped(bh_result);
2515 }
2516 ret = 0;
2517 }
2518
2519 return ret;
2520 }
2521
2522 /*
2523 * This function is used as a standard get_block_t calback function
2524 * when there is no desire to allocate any blocks. It is used as a
2525 * callback function for block_prepare_write(), nobh_writepage(), and
2526 * block_write_full_page(). These functions should only try to map a
2527 * single block at a time.
2528 *
2529 * Since this function doesn't do block allocations even if the caller
2530 * requests it by passing in create=1, it is critically important that
2531 * any caller checks to make sure that any buffer heads are returned
2532 * by this function are either all already mapped or marked for
2533 * delayed allocation before calling nobh_writepage() or
2534 * block_write_full_page(). Otherwise, b_blocknr could be left
2535 * unitialized, and the page write functions will be taken by
2536 * surprise.
2537 */
2538 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2539 struct buffer_head *bh_result, int create)
2540 {
2541 int ret = 0;
2542 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2543
2544 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2545
2546 /*
2547 * we don't want to do block allocation in writepage
2548 * so call get_block_wrap with create = 0
2549 */
2550 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2551 if (ret > 0) {
2552 bh_result->b_size = (ret << inode->i_blkbits);
2553 ret = 0;
2554 }
2555 return ret;
2556 }
2557
2558 static int bget_one(handle_t *handle, struct buffer_head *bh)
2559 {
2560 get_bh(bh);
2561 return 0;
2562 }
2563
2564 static int bput_one(handle_t *handle, struct buffer_head *bh)
2565 {
2566 put_bh(bh);
2567 return 0;
2568 }
2569
2570 static int __ext4_journalled_writepage(struct page *page,
2571 unsigned int len)
2572 {
2573 struct address_space *mapping = page->mapping;
2574 struct inode *inode = mapping->host;
2575 struct buffer_head *page_bufs;
2576 handle_t *handle = NULL;
2577 int ret = 0;
2578 int err;
2579
2580 page_bufs = page_buffers(page);
2581 BUG_ON(!page_bufs);
2582 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2583 /* As soon as we unlock the page, it can go away, but we have
2584 * references to buffers so we are safe */
2585 unlock_page(page);
2586
2587 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2588 if (IS_ERR(handle)) {
2589 ret = PTR_ERR(handle);
2590 goto out;
2591 }
2592
2593 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2594 do_journal_get_write_access);
2595
2596 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2597 write_end_fn);
2598 if (ret == 0)
2599 ret = err;
2600 err = ext4_journal_stop(handle);
2601 if (!ret)
2602 ret = err;
2603
2604 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2605 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
2606 out:
2607 return ret;
2608 }
2609
2610 /*
2611 * Note that we don't need to start a transaction unless we're journaling data
2612 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2613 * need to file the inode to the transaction's list in ordered mode because if
2614 * we are writing back data added by write(), the inode is already there and if
2615 * we are writing back data modified via mmap(), noone guarantees in which
2616 * transaction the data will hit the disk. In case we are journaling data, we
2617 * cannot start transaction directly because transaction start ranks above page
2618 * lock so we have to do some magic.
2619 *
2620 * This function can get called via...
2621 * - ext4_da_writepages after taking page lock (have journal handle)
2622 * - journal_submit_inode_data_buffers (no journal handle)
2623 * - shrink_page_list via pdflush (no journal handle)
2624 * - grab_page_cache when doing write_begin (have journal handle)
2625 *
2626 * We don't do any block allocation in this function. If we have page with
2627 * multiple blocks we need to write those buffer_heads that are mapped. This
2628 * is important for mmaped based write. So if we do with blocksize 1K
2629 * truncate(f, 1024);
2630 * a = mmap(f, 0, 4096);
2631 * a[0] = 'a';
2632 * truncate(f, 4096);
2633 * we have in the page first buffer_head mapped via page_mkwrite call back
2634 * but other bufer_heads would be unmapped but dirty(dirty done via the
2635 * do_wp_page). So writepage should write the first block. If we modify
2636 * the mmap area beyond 1024 we will again get a page_fault and the
2637 * page_mkwrite callback will do the block allocation and mark the
2638 * buffer_heads mapped.
2639 *
2640 * We redirty the page if we have any buffer_heads that is either delay or
2641 * unwritten in the page.
2642 *
2643 * We can get recursively called as show below.
2644 *
2645 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2646 * ext4_writepage()
2647 *
2648 * But since we don't do any block allocation we should not deadlock.
2649 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2650 */
2651 static int ext4_writepage(struct page *page,
2652 struct writeback_control *wbc)
2653 {
2654 int ret = 0;
2655 loff_t size;
2656 unsigned int len;
2657 struct buffer_head *page_bufs;
2658 struct inode *inode = page->mapping->host;
2659
2660 trace_ext4_writepage(inode, page);
2661 size = i_size_read(inode);
2662 if (page->index == size >> PAGE_CACHE_SHIFT)
2663 len = size & ~PAGE_CACHE_MASK;
2664 else
2665 len = PAGE_CACHE_SIZE;
2666
2667 if (page_has_buffers(page)) {
2668 page_bufs = page_buffers(page);
2669 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2670 ext4_bh_delay_or_unwritten)) {
2671 /*
2672 * We don't want to do block allocation
2673 * So redirty the page and return
2674 * We may reach here when we do a journal commit
2675 * via journal_submit_inode_data_buffers.
2676 * If we don't have mapping block we just ignore
2677 * them. We can also reach here via shrink_page_list
2678 */
2679 redirty_page_for_writepage(wbc, page);
2680 unlock_page(page);
2681 return 0;
2682 }
2683 } else {
2684 /*
2685 * The test for page_has_buffers() is subtle:
2686 * We know the page is dirty but it lost buffers. That means
2687 * that at some moment in time after write_begin()/write_end()
2688 * has been called all buffers have been clean and thus they
2689 * must have been written at least once. So they are all
2690 * mapped and we can happily proceed with mapping them
2691 * and writing the page.
2692 *
2693 * Try to initialize the buffer_heads and check whether
2694 * all are mapped and non delay. We don't want to
2695 * do block allocation here.
2696 */
2697 ret = block_prepare_write(page, 0, len,
2698 noalloc_get_block_write);
2699 if (!ret) {
2700 page_bufs = page_buffers(page);
2701 /* check whether all are mapped and non delay */
2702 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2703 ext4_bh_delay_or_unwritten)) {
2704 redirty_page_for_writepage(wbc, page);
2705 unlock_page(page);
2706 return 0;
2707 }
2708 } else {
2709 /*
2710 * We can't do block allocation here
2711 * so just redity the page and unlock
2712 * and return
2713 */
2714 redirty_page_for_writepage(wbc, page);
2715 unlock_page(page);
2716 return 0;
2717 }
2718 /* now mark the buffer_heads as dirty and uptodate */
2719 block_commit_write(page, 0, len);
2720 }
2721
2722 if (PageChecked(page) && ext4_should_journal_data(inode)) {
2723 /*
2724 * It's mmapped pagecache. Add buffers and journal it. There
2725 * doesn't seem much point in redirtying the page here.
2726 */
2727 ClearPageChecked(page);
2728 return __ext4_journalled_writepage(page, len);
2729 }
2730
2731 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2732 ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2733 else
2734 ret = block_write_full_page(page, noalloc_get_block_write,
2735 wbc);
2736
2737 return ret;
2738 }
2739
2740 /*
2741 * This is called via ext4_da_writepages() to
2742 * calulate the total number of credits to reserve to fit
2743 * a single extent allocation into a single transaction,
2744 * ext4_da_writpeages() will loop calling this before
2745 * the block allocation.
2746 */
2747
2748 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2749 {
2750 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2751
2752 /*
2753 * With non-extent format the journal credit needed to
2754 * insert nrblocks contiguous block is dependent on
2755 * number of contiguous block. So we will limit
2756 * number of contiguous block to a sane value
2757 */
2758 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
2759 (max_blocks > EXT4_MAX_TRANS_DATA))
2760 max_blocks = EXT4_MAX_TRANS_DATA;
2761
2762 return ext4_chunk_trans_blocks(inode, max_blocks);
2763 }
2764
2765 static int ext4_da_writepages(struct address_space *mapping,
2766 struct writeback_control *wbc)
2767 {
2768 pgoff_t index;
2769 int range_whole = 0;
2770 handle_t *handle = NULL;
2771 struct mpage_da_data mpd;
2772 struct inode *inode = mapping->host;
2773 int no_nrwrite_index_update;
2774 int pages_written = 0;
2775 long pages_skipped;
2776 unsigned int max_pages;
2777 int range_cyclic, cycled = 1, io_done = 0;
2778 int needed_blocks, ret = 0;
2779 long desired_nr_to_write, nr_to_writebump = 0;
2780 loff_t range_start = wbc->range_start;
2781 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2782
2783 trace_ext4_da_writepages(inode, wbc);
2784
2785 /*
2786 * No pages to write? This is mainly a kludge to avoid starting
2787 * a transaction for special inodes like journal inode on last iput()
2788 * because that could violate lock ordering on umount
2789 */
2790 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2791 return 0;
2792
2793 /*
2794 * If the filesystem has aborted, it is read-only, so return
2795 * right away instead of dumping stack traces later on that
2796 * will obscure the real source of the problem. We test
2797 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2798 * the latter could be true if the filesystem is mounted
2799 * read-only, and in that case, ext4_da_writepages should
2800 * *never* be called, so if that ever happens, we would want
2801 * the stack trace.
2802 */
2803 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2804 return -EROFS;
2805
2806 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2807 range_whole = 1;
2808
2809 range_cyclic = wbc->range_cyclic;
2810 if (wbc->range_cyclic) {
2811 index = mapping->writeback_index;
2812 if (index)
2813 cycled = 0;
2814 wbc->range_start = index << PAGE_CACHE_SHIFT;
2815 wbc->range_end = LLONG_MAX;
2816 wbc->range_cyclic = 0;
2817 } else
2818 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2819
2820 /*
2821 * This works around two forms of stupidity. The first is in
2822 * the writeback code, which caps the maximum number of pages
2823 * written to be 1024 pages. This is wrong on multiple
2824 * levels; different architectues have a different page size,
2825 * which changes the maximum amount of data which gets
2826 * written. Secondly, 4 megabytes is way too small. XFS
2827 * forces this value to be 16 megabytes by multiplying
2828 * nr_to_write parameter by four, and then relies on its
2829 * allocator to allocate larger extents to make them
2830 * contiguous. Unfortunately this brings us to the second
2831 * stupidity, which is that ext4's mballoc code only allocates
2832 * at most 2048 blocks. So we force contiguous writes up to
2833 * the number of dirty blocks in the inode, or
2834 * sbi->max_writeback_mb_bump whichever is smaller.
2835 */
2836 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2837 if (!range_cyclic && range_whole)
2838 desired_nr_to_write = wbc->nr_to_write * 8;
2839 else
2840 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2841 max_pages);
2842 if (desired_nr_to_write > max_pages)
2843 desired_nr_to_write = max_pages;
2844
2845 if (wbc->nr_to_write < desired_nr_to_write) {
2846 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2847 wbc->nr_to_write = desired_nr_to_write;
2848 }
2849
2850 mpd.wbc = wbc;
2851 mpd.inode = mapping->host;
2852
2853 /*
2854 * we don't want write_cache_pages to update
2855 * nr_to_write and writeback_index
2856 */
2857 no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2858 wbc->no_nrwrite_index_update = 1;
2859 pages_skipped = wbc->pages_skipped;
2860
2861 retry:
2862 while (!ret && wbc->nr_to_write > 0) {
2863
2864 /*
2865 * we insert one extent at a time. So we need
2866 * credit needed for single extent allocation.
2867 * journalled mode is currently not supported
2868 * by delalloc
2869 */
2870 BUG_ON(ext4_should_journal_data(inode));
2871 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2872
2873 /* start a new transaction*/
2874 handle = ext4_journal_start(inode, needed_blocks);
2875 if (IS_ERR(handle)) {
2876 ret = PTR_ERR(handle);
2877 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2878 "%ld pages, ino %lu; err %d\n", __func__,
2879 wbc->nr_to_write, inode->i_ino, ret);
2880 goto out_writepages;
2881 }
2882
2883 /*
2884 * Now call __mpage_da_writepage to find the next
2885 * contiguous region of logical blocks that need
2886 * blocks to be allocated by ext4. We don't actually
2887 * submit the blocks for I/O here, even though
2888 * write_cache_pages thinks it will, and will set the
2889 * pages as clean for write before calling
2890 * __mpage_da_writepage().
2891 */
2892 mpd.b_size = 0;
2893 mpd.b_state = 0;
2894 mpd.b_blocknr = 0;
2895 mpd.first_page = 0;
2896 mpd.next_page = 0;
2897 mpd.io_done = 0;
2898 mpd.pages_written = 0;
2899 mpd.retval = 0;
2900 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2901 &mpd);
2902 /*
2903 * If we have a contigous extent of pages and we
2904 * haven't done the I/O yet, map the blocks and submit
2905 * them for I/O.
2906 */
2907 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2908 if (mpage_da_map_blocks(&mpd) == 0)
2909 mpage_da_submit_io(&mpd);
2910 mpd.io_done = 1;
2911 ret = MPAGE_DA_EXTENT_TAIL;
2912 }
2913 trace_ext4_da_write_pages(inode, &mpd);
2914 wbc->nr_to_write -= mpd.pages_written;
2915
2916 ext4_journal_stop(handle);
2917
2918 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2919 /* commit the transaction which would
2920 * free blocks released in the transaction
2921 * and try again
2922 */
2923 jbd2_journal_force_commit_nested(sbi->s_journal);
2924 wbc->pages_skipped = pages_skipped;
2925 ret = 0;
2926 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2927 /*
2928 * got one extent now try with
2929 * rest of the pages
2930 */
2931 pages_written += mpd.pages_written;
2932 wbc->pages_skipped = pages_skipped;
2933 ret = 0;
2934 io_done = 1;
2935 } else if (wbc->nr_to_write)
2936 /*
2937 * There is no more writeout needed
2938 * or we requested for a noblocking writeout
2939 * and we found the device congested
2940 */
2941 break;
2942 }
2943 if (!io_done && !cycled) {
2944 cycled = 1;
2945 index = 0;
2946 wbc->range_start = index << PAGE_CACHE_SHIFT;
2947 wbc->range_end = mapping->writeback_index - 1;
2948 goto retry;
2949 }
2950 if (pages_skipped != wbc->pages_skipped)
2951 ext4_msg(inode->i_sb, KERN_CRIT,
2952 "This should not happen leaving %s "
2953 "with nr_to_write = %ld ret = %d\n",
2954 __func__, wbc->nr_to_write, ret);
2955
2956 /* Update index */
2957 index += pages_written;
2958 wbc->range_cyclic = range_cyclic;
2959 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2960 /*
2961 * set the writeback_index so that range_cyclic
2962 * mode will write it back later
2963 */
2964 mapping->writeback_index = index;
2965
2966 out_writepages:
2967 if (!no_nrwrite_index_update)
2968 wbc->no_nrwrite_index_update = 0;
2969 if (wbc->nr_to_write > nr_to_writebump)
2970 wbc->nr_to_write -= nr_to_writebump;
2971 wbc->range_start = range_start;
2972 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2973 return ret;
2974 }
2975
2976 #define FALL_BACK_TO_NONDELALLOC 1
2977 static int ext4_nonda_switch(struct super_block *sb)
2978 {
2979 s64 free_blocks, dirty_blocks;
2980 struct ext4_sb_info *sbi = EXT4_SB(sb);
2981
2982 /*
2983 * switch to non delalloc mode if we are running low
2984 * on free block. The free block accounting via percpu
2985 * counters can get slightly wrong with percpu_counter_batch getting
2986 * accumulated on each CPU without updating global counters
2987 * Delalloc need an accurate free block accounting. So switch
2988 * to non delalloc when we are near to error range.
2989 */
2990 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
2991 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
2992 if (2 * free_blocks < 3 * dirty_blocks ||
2993 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
2994 /*
2995 * free block count is less that 150% of dirty blocks
2996 * or free blocks is less that watermark
2997 */
2998 return 1;
2999 }
3000 return 0;
3001 }
3002
3003 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3004 loff_t pos, unsigned len, unsigned flags,
3005 struct page **pagep, void **fsdata)
3006 {
3007 int ret, retries = 0;
3008 struct page *page;
3009 pgoff_t index;
3010 unsigned from, to;
3011 struct inode *inode = mapping->host;
3012 handle_t *handle;
3013
3014 index = pos >> PAGE_CACHE_SHIFT;
3015 from = pos & (PAGE_CACHE_SIZE - 1);
3016 to = from + len;
3017
3018 if (ext4_nonda_switch(inode->i_sb)) {
3019 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3020 return ext4_write_begin(file, mapping, pos,
3021 len, flags, pagep, fsdata);
3022 }
3023 *fsdata = (void *)0;
3024 trace_ext4_da_write_begin(inode, pos, len, flags);
3025 retry:
3026 /*
3027 * With delayed allocation, we don't log the i_disksize update
3028 * if there is delayed block allocation. But we still need
3029 * to journalling the i_disksize update if writes to the end
3030 * of file which has an already mapped buffer.
3031 */
3032 handle = ext4_journal_start(inode, 1);
3033 if (IS_ERR(handle)) {
3034 ret = PTR_ERR(handle);
3035 goto out;
3036 }
3037 /* We cannot recurse into the filesystem as the transaction is already
3038 * started */
3039 flags |= AOP_FLAG_NOFS;
3040
3041 page = grab_cache_page_write_begin(mapping, index, flags);
3042 if (!page) {
3043 ext4_journal_stop(handle);
3044 ret = -ENOMEM;
3045 goto out;
3046 }
3047 *pagep = page;
3048
3049 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
3050 ext4_da_get_block_prep);
3051 if (ret < 0) {
3052 unlock_page(page);
3053 ext4_journal_stop(handle);
3054 page_cache_release(page);
3055 /*
3056 * block_write_begin may have instantiated a few blocks
3057 * outside i_size. Trim these off again. Don't need
3058 * i_size_read because we hold i_mutex.
3059 */
3060 if (pos + len > inode->i_size)
3061 ext4_truncate_failed_write(inode);
3062 }
3063
3064 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3065 goto retry;
3066 out:
3067 return ret;
3068 }
3069
3070 /*
3071 * Check if we should update i_disksize
3072 * when write to the end of file but not require block allocation
3073 */
3074 static int ext4_da_should_update_i_disksize(struct page *page,
3075 unsigned long offset)
3076 {
3077 struct buffer_head *bh;
3078 struct inode *inode = page->mapping->host;
3079 unsigned int idx;
3080 int i;
3081
3082 bh = page_buffers(page);
3083 idx = offset >> inode->i_blkbits;
3084
3085 for (i = 0; i < idx; i++)
3086 bh = bh->b_this_page;
3087
3088 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3089 return 0;
3090 return 1;
3091 }
3092
3093 static int ext4_da_write_end(struct file *file,
3094 struct address_space *mapping,
3095 loff_t pos, unsigned len, unsigned copied,
3096 struct page *page, void *fsdata)
3097 {
3098 struct inode *inode = mapping->host;
3099 int ret = 0, ret2;
3100 handle_t *handle = ext4_journal_current_handle();
3101 loff_t new_i_size;
3102 unsigned long start, end;
3103 int write_mode = (int)(unsigned long)fsdata;
3104
3105 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
3106 if (ext4_should_order_data(inode)) {
3107 return ext4_ordered_write_end(file, mapping, pos,
3108 len, copied, page, fsdata);
3109 } else if (ext4_should_writeback_data(inode)) {
3110 return ext4_writeback_write_end(file, mapping, pos,
3111 len, copied, page, fsdata);
3112 } else {
3113 BUG();
3114 }
3115 }
3116
3117 trace_ext4_da_write_end(inode, pos, len, copied);
3118 start = pos & (PAGE_CACHE_SIZE - 1);
3119 end = start + copied - 1;
3120
3121 /*
3122 * generic_write_end() will run mark_inode_dirty() if i_size
3123 * changes. So let's piggyback the i_disksize mark_inode_dirty
3124 * into that.
3125 */
3126
3127 new_i_size = pos + copied;
3128 if (new_i_size > EXT4_I(inode)->i_disksize) {
3129 if (ext4_da_should_update_i_disksize(page, end)) {
3130 down_write(&EXT4_I(inode)->i_data_sem);
3131 if (new_i_size > EXT4_I(inode)->i_disksize) {
3132 /*
3133 * Updating i_disksize when extending file
3134 * without needing block allocation
3135 */
3136 if (ext4_should_order_data(inode))
3137 ret = ext4_jbd2_file_inode(handle,
3138 inode);
3139
3140 EXT4_I(inode)->i_disksize = new_i_size;
3141 }
3142 up_write(&EXT4_I(inode)->i_data_sem);
3143 /* We need to mark inode dirty even if
3144 * new_i_size is less that inode->i_size
3145 * bu greater than i_disksize.(hint delalloc)
3146 */
3147 ext4_mark_inode_dirty(handle, inode);
3148 }
3149 }
3150 ret2 = generic_write_end(file, mapping, pos, len, copied,
3151 page, fsdata);
3152 copied = ret2;
3153 if (ret2 < 0)
3154 ret = ret2;
3155 ret2 = ext4_journal_stop(handle);
3156 if (!ret)
3157 ret = ret2;
3158
3159 return ret ? ret : copied;
3160 }
3161
3162 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
3163 {
3164 /*
3165 * Drop reserved blocks
3166 */
3167 BUG_ON(!PageLocked(page));
3168 if (!page_has_buffers(page))
3169 goto out;
3170
3171 ext4_da_page_release_reservation(page, offset);
3172
3173 out:
3174 ext4_invalidatepage(page, offset);
3175
3176 return;
3177 }
3178
3179 /*
3180 * Force all delayed allocation blocks to be allocated for a given inode.
3181 */
3182 int ext4_alloc_da_blocks(struct inode *inode)
3183 {
3184 trace_ext4_alloc_da_blocks(inode);
3185
3186 if (!EXT4_I(inode)->i_reserved_data_blocks &&
3187 !EXT4_I(inode)->i_reserved_meta_blocks)
3188 return 0;
3189
3190 /*
3191 * We do something simple for now. The filemap_flush() will
3192 * also start triggering a write of the data blocks, which is
3193 * not strictly speaking necessary (and for users of
3194 * laptop_mode, not even desirable). However, to do otherwise
3195 * would require replicating code paths in:
3196 *
3197 * ext4_da_writepages() ->
3198 * write_cache_pages() ---> (via passed in callback function)
3199 * __mpage_da_writepage() -->
3200 * mpage_add_bh_to_extent()
3201 * mpage_da_map_blocks()
3202 *
3203 * The problem is that write_cache_pages(), located in
3204 * mm/page-writeback.c, marks pages clean in preparation for
3205 * doing I/O, which is not desirable if we're not planning on
3206 * doing I/O at all.
3207 *
3208 * We could call write_cache_pages(), and then redirty all of
3209 * the pages by calling redirty_page_for_writeback() but that
3210 * would be ugly in the extreme. So instead we would need to
3211 * replicate parts of the code in the above functions,
3212 * simplifying them becuase we wouldn't actually intend to
3213 * write out the pages, but rather only collect contiguous
3214 * logical block extents, call the multi-block allocator, and
3215 * then update the buffer heads with the block allocations.
3216 *
3217 * For now, though, we'll cheat by calling filemap_flush(),
3218 * which will map the blocks, and start the I/O, but not
3219 * actually wait for the I/O to complete.
3220 */
3221 return filemap_flush(inode->i_mapping);
3222 }
3223
3224 /*
3225 * bmap() is special. It gets used by applications such as lilo and by
3226 * the swapper to find the on-disk block of a specific piece of data.
3227 *
3228 * Naturally, this is dangerous if the block concerned is still in the
3229 * journal. If somebody makes a swapfile on an ext4 data-journaling
3230 * filesystem and enables swap, then they may get a nasty shock when the
3231 * data getting swapped to that swapfile suddenly gets overwritten by
3232 * the original zero's written out previously to the journal and
3233 * awaiting writeback in the kernel's buffer cache.
3234 *
3235 * So, if we see any bmap calls here on a modified, data-journaled file,
3236 * take extra steps to flush any blocks which might be in the cache.
3237 */
3238 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3239 {
3240 struct inode *inode = mapping->host;
3241 journal_t *journal;
3242 int err;
3243
3244 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3245 test_opt(inode->i_sb, DELALLOC)) {
3246 /*
3247 * With delalloc we want to sync the file
3248 * so that we can make sure we allocate
3249 * blocks for file
3250 */
3251 filemap_write_and_wait(mapping);
3252 }
3253
3254 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
3255 /*
3256 * This is a REALLY heavyweight approach, but the use of
3257 * bmap on dirty files is expected to be extremely rare:
3258 * only if we run lilo or swapon on a freshly made file
3259 * do we expect this to happen.
3260 *
3261 * (bmap requires CAP_SYS_RAWIO so this does not
3262 * represent an unprivileged user DOS attack --- we'd be
3263 * in trouble if mortal users could trigger this path at
3264 * will.)
3265 *
3266 * NB. EXT4_STATE_JDATA is not set on files other than
3267 * regular files. If somebody wants to bmap a directory
3268 * or symlink and gets confused because the buffer
3269 * hasn't yet been flushed to disk, they deserve
3270 * everything they get.
3271 */
3272
3273 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
3274 journal = EXT4_JOURNAL(inode);
3275 jbd2_journal_lock_updates(journal);
3276 err = jbd2_journal_flush(journal);
3277 jbd2_journal_unlock_updates(journal);
3278
3279 if (err)
3280 return 0;
3281 }
3282
3283 return generic_block_bmap(mapping, block, ext4_get_block);
3284 }
3285
3286 static int ext4_readpage(struct file *file, struct page *page)
3287 {
3288 return mpage_readpage(page, ext4_get_block);
3289 }
3290
3291 static int
3292 ext4_readpages(struct file *file, struct address_space *mapping,
3293 struct list_head *pages, unsigned nr_pages)
3294 {
3295 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3296 }
3297
3298 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3299 {
3300 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3301
3302 /*
3303 * If it's a full truncate we just forget about the pending dirtying
3304 */
3305 if (offset == 0)
3306 ClearPageChecked(page);
3307
3308 if (journal)
3309 jbd2_journal_invalidatepage(journal, page, offset);
3310 else
3311 block_invalidatepage(page, offset);
3312 }
3313
3314 static int ext4_releasepage(struct page *page, gfp_t wait)
3315 {
3316 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3317
3318 WARN_ON(PageChecked(page));
3319 if (!page_has_buffers(page))
3320 return 0;
3321 if (journal)
3322 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3323 else
3324 return try_to_free_buffers(page);
3325 }
3326
3327 /*
3328 * O_DIRECT for ext3 (or indirect map) based files
3329 *
3330 * If the O_DIRECT write will extend the file then add this inode to the
3331 * orphan list. So recovery will truncate it back to the original size
3332 * if the machine crashes during the write.
3333 *
3334 * If the O_DIRECT write is intantiating holes inside i_size and the machine
3335 * crashes then stale disk data _may_ be exposed inside the file. But current
3336 * VFS code falls back into buffered path in that case so we are safe.
3337 */
3338 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3339 const struct iovec *iov, loff_t offset,
3340 unsigned long nr_segs)
3341 {
3342 struct file *file = iocb->ki_filp;
3343 struct inode *inode = file->f_mapping->host;
3344 struct ext4_inode_info *ei = EXT4_I(inode);
3345 handle_t *handle;
3346 ssize_t ret;
3347 int orphan = 0;
3348 size_t count = iov_length(iov, nr_segs);
3349 int retries = 0;
3350
3351 if (rw == WRITE) {
3352 loff_t final_size = offset + count;
3353
3354 if (final_size > inode->i_size) {
3355 /* Credits for sb + inode write */
3356 handle = ext4_journal_start(inode, 2);
3357 if (IS_ERR(handle)) {
3358 ret = PTR_ERR(handle);
3359 goto out;
3360 }
3361 ret = ext4_orphan_add(handle, inode);
3362 if (ret) {
3363 ext4_journal_stop(handle);
3364 goto out;
3365 }
3366 orphan = 1;
3367 ei->i_disksize = inode->i_size;
3368 ext4_journal_stop(handle);
3369 }
3370 }
3371
3372 retry:
3373 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3374 offset, nr_segs,
3375 ext4_get_block, NULL);
3376 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3377 goto retry;
3378
3379 if (orphan) {
3380 int err;
3381
3382 /* Credits for sb + inode write */
3383 handle = ext4_journal_start(inode, 2);
3384 if (IS_ERR(handle)) {
3385 /* This is really bad luck. We've written the data
3386 * but cannot extend i_size. Bail out and pretend
3387 * the write failed... */
3388 ret = PTR_ERR(handle);
3389 goto out;
3390 }
3391 if (inode->i_nlink)
3392 ext4_orphan_del(handle, inode);
3393 if (ret > 0) {
3394 loff_t end = offset + ret;
3395 if (end > inode->i_size) {
3396 ei->i_disksize = end;
3397 i_size_write(inode, end);
3398 /*
3399 * We're going to return a positive `ret'
3400 * here due to non-zero-length I/O, so there's
3401 * no way of reporting error returns from
3402 * ext4_mark_inode_dirty() to userspace. So
3403 * ignore it.
3404 */
3405 ext4_mark_inode_dirty(handle, inode);
3406 }
3407 }
3408 err = ext4_journal_stop(handle);
3409 if (ret == 0)
3410 ret = err;
3411 }
3412 out:
3413 return ret;
3414 }
3415
3416 static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
3417 struct buffer_head *bh_result, int create)
3418 {
3419 handle_t *handle = NULL;
3420 int ret = 0;
3421 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3422 int dio_credits;
3423
3424 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
3425 inode->i_ino, create);
3426 /*
3427 * DIO VFS code passes create = 0 flag for write to
3428 * the middle of file. It does this to avoid block
3429 * allocation for holes, to prevent expose stale data
3430 * out when there is parallel buffered read (which does
3431 * not hold the i_mutex lock) while direct IO write has
3432 * not completed. DIO request on holes finally falls back
3433 * to buffered IO for this reason.
3434 *
3435 * For ext4 extent based file, since we support fallocate,
3436 * new allocated extent as uninitialized, for holes, we
3437 * could fallocate blocks for holes, thus parallel
3438 * buffered IO read will zero out the page when read on
3439 * a hole while parallel DIO write to the hole has not completed.
3440 *
3441 * when we come here, we know it's a direct IO write to
3442 * to the middle of file (<i_size)
3443 * so it's safe to override the create flag from VFS.
3444 */
3445 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
3446
3447 if (max_blocks > DIO_MAX_BLOCKS)
3448 max_blocks = DIO_MAX_BLOCKS;
3449 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3450 handle = ext4_journal_start(inode, dio_credits);
3451 if (IS_ERR(handle)) {
3452 ret = PTR_ERR(handle);
3453 goto out;
3454 }
3455 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3456 create);
3457 if (ret > 0) {
3458 bh_result->b_size = (ret << inode->i_blkbits);
3459 ret = 0;
3460 }
3461 ext4_journal_stop(handle);
3462 out:
3463 return ret;
3464 }
3465
3466 static void ext4_free_io_end(ext4_io_end_t *io)
3467 {
3468 BUG_ON(!io);
3469 iput(io->inode);
3470 kfree(io);
3471 }
3472 static void dump_aio_dio_list(struct inode * inode)
3473 {
3474 #ifdef EXT4_DEBUG
3475 struct list_head *cur, *before, *after;
3476 ext4_io_end_t *io, *io0, *io1;
3477
3478 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3479 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
3480 return;
3481 }
3482
3483 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
3484 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
3485 cur = &io->list;
3486 before = cur->prev;
3487 io0 = container_of(before, ext4_io_end_t, list);
3488 after = cur->next;
3489 io1 = container_of(after, ext4_io_end_t, list);
3490
3491 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3492 io, inode->i_ino, io0, io1);
3493 }
3494 #endif
3495 }
3496
3497 /*
3498 * check a range of space and convert unwritten extents to written.
3499 */
3500 static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3501 {
3502 struct inode *inode = io->inode;
3503 loff_t offset = io->offset;
3504 size_t size = io->size;
3505 int ret = 0;
3506
3507 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3508 "list->prev 0x%p\n",
3509 io, inode->i_ino, io->list.next, io->list.prev);
3510
3511 if (list_empty(&io->list))
3512 return ret;
3513
3514 if (io->flag != DIO_AIO_UNWRITTEN)
3515 return ret;
3516
3517 if (offset + size <= i_size_read(inode))
3518 ret = ext4_convert_unwritten_extents(inode, offset, size);
3519
3520 if (ret < 0) {
3521 printk(KERN_EMERG "%s: failed to convert unwritten"
3522 "extents to written extents, error is %d"
3523 " io is still on inode %lu aio dio list\n",
3524 __func__, ret, inode->i_ino);
3525 return ret;
3526 }
3527
3528 /* clear the DIO AIO unwritten flag */
3529 io->flag = 0;
3530 return ret;
3531 }
3532 /*
3533 * work on completed aio dio IO, to convert unwritten extents to extents
3534 */
3535 static void ext4_end_aio_dio_work(struct work_struct *work)
3536 {
3537 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
3538 struct inode *inode = io->inode;
3539 int ret = 0;
3540
3541 mutex_lock(&inode->i_mutex);
3542 ret = ext4_end_aio_dio_nolock(io);
3543 if (ret >= 0) {
3544 if (!list_empty(&io->list))
3545 list_del_init(&io->list);
3546 ext4_free_io_end(io);
3547 }
3548 mutex_unlock(&inode->i_mutex);
3549 }
3550 /*
3551 * This function is called from ext4_sync_file().
3552 *
3553 * When AIO DIO IO is completed, the work to convert unwritten
3554 * extents to written is queued on workqueue but may not get immediately
3555 * scheduled. When fsync is called, we need to ensure the
3556 * conversion is complete before fsync returns.
3557 * The inode keeps track of a list of completed AIO from DIO path
3558 * that might needs to do the conversion. This function walks through
3559 * the list and convert the related unwritten extents to written.
3560 */
3561 int flush_aio_dio_completed_IO(struct inode *inode)
3562 {
3563 ext4_io_end_t *io;
3564 int ret = 0;
3565 int ret2 = 0;
3566
3567 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
3568 return ret;
3569
3570 dump_aio_dio_list(inode);
3571 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3572 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
3573 ext4_io_end_t, list);
3574 /*
3575 * Calling ext4_end_aio_dio_nolock() to convert completed
3576 * IO to written.
3577 *
3578 * When ext4_sync_file() is called, run_queue() may already
3579 * about to flush the work corresponding to this io structure.
3580 * It will be upset if it founds the io structure related
3581 * to the work-to-be schedule is freed.
3582 *
3583 * Thus we need to keep the io structure still valid here after
3584 * convertion finished. The io structure has a flag to
3585 * avoid double converting from both fsync and background work
3586 * queue work.
3587 */
3588 ret = ext4_end_aio_dio_nolock(io);
3589 if (ret < 0)
3590 ret2 = ret;
3591 else
3592 list_del_init(&io->list);
3593 }
3594 return (ret2 < 0) ? ret2 : 0;
3595 }
3596
3597 static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
3598 {
3599 ext4_io_end_t *io = NULL;
3600
3601 io = kmalloc(sizeof(*io), GFP_NOFS);
3602
3603 if (io) {
3604 igrab(inode);
3605 io->inode = inode;
3606 io->flag = 0;
3607 io->offset = 0;
3608 io->size = 0;
3609 io->error = 0;
3610 INIT_WORK(&io->work, ext4_end_aio_dio_work);
3611 INIT_LIST_HEAD(&io->list);
3612 }
3613
3614 return io;
3615 }
3616
3617 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3618 ssize_t size, void *private)
3619 {
3620 ext4_io_end_t *io_end = iocb->private;
3621 struct workqueue_struct *wq;
3622
3623 /* if not async direct IO or dio with 0 bytes write, just return */
3624 if (!io_end || !size)
3625 return;
3626
3627 ext_debug("ext4_end_io_dio(): io_end 0x%p"
3628 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3629 iocb->private, io_end->inode->i_ino, iocb, offset,
3630 size);
3631
3632 /* if not aio dio with unwritten extents, just free io and return */
3633 if (io_end->flag != DIO_AIO_UNWRITTEN){
3634 ext4_free_io_end(io_end);
3635 iocb->private = NULL;
3636 return;
3637 }
3638
3639 io_end->offset = offset;
3640 io_end->size = size;
3641 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3642
3643 /* queue the work to convert unwritten extents to written */
3644 queue_work(wq, &io_end->work);
3645
3646 /* Add the io_end to per-inode completed aio dio list*/
3647 list_add_tail(&io_end->list,
3648 &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
3649 iocb->private = NULL;
3650 }
3651 /*
3652 * For ext4 extent files, ext4 will do direct-io write to holes,
3653 * preallocated extents, and those write extend the file, no need to
3654 * fall back to buffered IO.
3655 *
3656 * For holes, we fallocate those blocks, mark them as unintialized
3657 * If those blocks were preallocated, we mark sure they are splited, but
3658 * still keep the range to write as unintialized.
3659 *
3660 * The unwrritten extents will be converted to written when DIO is completed.
3661 * For async direct IO, since the IO may still pending when return, we
3662 * set up an end_io call back function, which will do the convertion
3663 * when async direct IO completed.
3664 *
3665 * If the O_DIRECT write will extend the file then add this inode to the
3666 * orphan list. So recovery will truncate it back to the original size
3667 * if the machine crashes during the write.
3668 *
3669 */
3670 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3671 const struct iovec *iov, loff_t offset,
3672 unsigned long nr_segs)
3673 {
3674 struct file *file = iocb->ki_filp;
3675 struct inode *inode = file->f_mapping->host;
3676 ssize_t ret;
3677 size_t count = iov_length(iov, nr_segs);
3678
3679 loff_t final_size = offset + count;
3680 if (rw == WRITE && final_size <= inode->i_size) {
3681 /*
3682 * We could direct write to holes and fallocate.
3683 *
3684 * Allocated blocks to fill the hole are marked as uninitialized
3685 * to prevent paralel buffered read to expose the stale data
3686 * before DIO complete the data IO.
3687 *
3688 * As to previously fallocated extents, ext4 get_block
3689 * will just simply mark the buffer mapped but still
3690 * keep the extents uninitialized.
3691 *
3692 * for non AIO case, we will convert those unwritten extents
3693 * to written after return back from blockdev_direct_IO.
3694 *
3695 * for async DIO, the conversion needs to be defered when
3696 * the IO is completed. The ext4 end_io callback function
3697 * will be called to take care of the conversion work.
3698 * Here for async case, we allocate an io_end structure to
3699 * hook to the iocb.
3700 */
3701 iocb->private = NULL;
3702 EXT4_I(inode)->cur_aio_dio = NULL;
3703 if (!is_sync_kiocb(iocb)) {
3704 iocb->private = ext4_init_io_end(inode);
3705 if (!iocb->private)
3706 return -ENOMEM;
3707 /*
3708 * we save the io structure for current async
3709 * direct IO, so that later ext4_get_blocks()
3710 * could flag the io structure whether there
3711 * is a unwritten extents needs to be converted
3712 * when IO is completed.
3713 */
3714 EXT4_I(inode)->cur_aio_dio = iocb->private;
3715 }
3716
3717 ret = blockdev_direct_IO(rw, iocb, inode,
3718 inode->i_sb->s_bdev, iov,
3719 offset, nr_segs,
3720 ext4_get_block_dio_write,
3721 ext4_end_io_dio);
3722 if (iocb->private)
3723 EXT4_I(inode)->cur_aio_dio = NULL;
3724 /*
3725 * The io_end structure takes a reference to the inode,
3726 * that structure needs to be destroyed and the
3727 * reference to the inode need to be dropped, when IO is
3728 * complete, even with 0 byte write, or failed.
3729 *
3730 * In the successful AIO DIO case, the io_end structure will be
3731 * desctroyed and the reference to the inode will be dropped
3732 * after the end_io call back function is called.
3733 *
3734 * In the case there is 0 byte write, or error case, since
3735 * VFS direct IO won't invoke the end_io call back function,
3736 * we need to free the end_io structure here.
3737 */
3738 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3739 ext4_free_io_end(iocb->private);
3740 iocb->private = NULL;
3741 } else if (ret > 0 && (EXT4_I(inode)->i_state &
3742 EXT4_STATE_DIO_UNWRITTEN)) {
3743 int err;
3744 /*
3745 * for non AIO case, since the IO is already
3746 * completed, we could do the convertion right here
3747 */
3748 err = ext4_convert_unwritten_extents(inode,
3749 offset, ret);
3750 if (err < 0)
3751 ret = err;
3752 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
3753 }
3754 return ret;
3755 }
3756
3757 /* for write the the end of file case, we fall back to old way */
3758 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3759 }
3760
3761 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3762 const struct iovec *iov, loff_t offset,
3763 unsigned long nr_segs)
3764 {
3765 struct file *file = iocb->ki_filp;
3766 struct inode *inode = file->f_mapping->host;
3767
3768 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3769 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3770
3771 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3772 }
3773
3774 /*
3775 * Pages can be marked dirty completely asynchronously from ext4's journalling
3776 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3777 * much here because ->set_page_dirty is called under VFS locks. The page is
3778 * not necessarily locked.
3779 *
3780 * We cannot just dirty the page and leave attached buffers clean, because the
3781 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3782 * or jbddirty because all the journalling code will explode.
3783 *
3784 * So what we do is to mark the page "pending dirty" and next time writepage
3785 * is called, propagate that into the buffers appropriately.
3786 */
3787 static int ext4_journalled_set_page_dirty(struct page *page)
3788 {
3789 SetPageChecked(page);
3790 return __set_page_dirty_nobuffers(page);
3791 }
3792
3793 static const struct address_space_operations ext4_ordered_aops = {
3794 .readpage = ext4_readpage,
3795 .readpages = ext4_readpages,
3796 .writepage = ext4_writepage,
3797 .sync_page = block_sync_page,
3798 .write_begin = ext4_write_begin,
3799 .write_end = ext4_ordered_write_end,
3800 .bmap = ext4_bmap,
3801 .invalidatepage = ext4_invalidatepage,
3802 .releasepage = ext4_releasepage,
3803 .direct_IO = ext4_direct_IO,
3804 .migratepage = buffer_migrate_page,
3805 .is_partially_uptodate = block_is_partially_uptodate,
3806 .error_remove_page = generic_error_remove_page,
3807 };
3808
3809 static const struct address_space_operations ext4_writeback_aops = {
3810 .readpage = ext4_readpage,
3811 .readpages = ext4_readpages,
3812 .writepage = ext4_writepage,
3813 .sync_page = block_sync_page,
3814 .write_begin = ext4_write_begin,
3815 .write_end = ext4_writeback_write_end,
3816 .bmap = ext4_bmap,
3817 .invalidatepage = ext4_invalidatepage,
3818 .releasepage = ext4_releasepage,
3819 .direct_IO = ext4_direct_IO,
3820 .migratepage = buffer_migrate_page,
3821 .is_partially_uptodate = block_is_partially_uptodate,
3822 .error_remove_page = generic_error_remove_page,
3823 };
3824
3825 static const struct address_space_operations ext4_journalled_aops = {
3826 .readpage = ext4_readpage,
3827 .readpages = ext4_readpages,
3828 .writepage = ext4_writepage,
3829 .sync_page = block_sync_page,
3830 .write_begin = ext4_write_begin,
3831 .write_end = ext4_journalled_write_end,
3832 .set_page_dirty = ext4_journalled_set_page_dirty,
3833 .bmap = ext4_bmap,
3834 .invalidatepage = ext4_invalidatepage,
3835 .releasepage = ext4_releasepage,
3836 .is_partially_uptodate = block_is_partially_uptodate,
3837 .error_remove_page = generic_error_remove_page,
3838 };
3839
3840 static const struct address_space_operations ext4_da_aops = {
3841 .readpage = ext4_readpage,
3842 .readpages = ext4_readpages,
3843 .writepage = ext4_writepage,
3844 .writepages = ext4_da_writepages,
3845 .sync_page = block_sync_page,
3846 .write_begin = ext4_da_write_begin,
3847 .write_end = ext4_da_write_end,
3848 .bmap = ext4_bmap,
3849 .invalidatepage = ext4_da_invalidatepage,
3850 .releasepage = ext4_releasepage,
3851 .direct_IO = ext4_direct_IO,
3852 .migratepage = buffer_migrate_page,
3853 .is_partially_uptodate = block_is_partially_uptodate,
3854 .error_remove_page = generic_error_remove_page,
3855 };
3856
3857 void ext4_set_aops(struct inode *inode)
3858 {
3859 if (ext4_should_order_data(inode) &&
3860 test_opt(inode->i_sb, DELALLOC))
3861 inode->i_mapping->a_ops = &ext4_da_aops;
3862 else if (ext4_should_order_data(inode))
3863 inode->i_mapping->a_ops = &ext4_ordered_aops;
3864 else if (ext4_should_writeback_data(inode) &&
3865 test_opt(inode->i_sb, DELALLOC))
3866 inode->i_mapping->a_ops = &ext4_da_aops;
3867 else if (ext4_should_writeback_data(inode))
3868 inode->i_mapping->a_ops = &ext4_writeback_aops;
3869 else
3870 inode->i_mapping->a_ops = &ext4_journalled_aops;
3871 }
3872
3873 /*
3874 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3875 * up to the end of the block which corresponds to `from'.
3876 * This required during truncate. We need to physically zero the tail end
3877 * of that block so it doesn't yield old data if the file is later grown.
3878 */
3879 int ext4_block_truncate_page(handle_t *handle,
3880 struct address_space *mapping, loff_t from)
3881 {
3882 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3883 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3884 unsigned blocksize, length, pos;
3885 ext4_lblk_t iblock;
3886 struct inode *inode = mapping->host;
3887 struct buffer_head *bh;
3888 struct page *page;
3889 int err = 0;
3890
3891 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3892 mapping_gfp_mask(mapping) & ~__GFP_FS);
3893 if (!page)
3894 return -EINVAL;
3895
3896 blocksize = inode->i_sb->s_blocksize;
3897 length = blocksize - (offset & (blocksize - 1));
3898 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3899
3900 /*
3901 * For "nobh" option, we can only work if we don't need to
3902 * read-in the page - otherwise we create buffers to do the IO.
3903 */
3904 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
3905 ext4_should_writeback_data(inode) && PageUptodate(page)) {
3906 zero_user(page, offset, length);
3907 set_page_dirty(page);
3908 goto unlock;
3909 }
3910
3911 if (!page_has_buffers(page))
3912 create_empty_buffers(page, blocksize, 0);
3913
3914 /* Find the buffer that contains "offset" */
3915 bh = page_buffers(page);
3916 pos = blocksize;
3917 while (offset >= pos) {
3918 bh = bh->b_this_page;
3919 iblock++;
3920 pos += blocksize;
3921 }
3922
3923 err = 0;
3924 if (buffer_freed(bh)) {
3925 BUFFER_TRACE(bh, "freed: skip");
3926 goto unlock;
3927 }
3928
3929 if (!buffer_mapped(bh)) {
3930 BUFFER_TRACE(bh, "unmapped");
3931 ext4_get_block(inode, iblock, bh, 0);
3932 /* unmapped? It's a hole - nothing to do */
3933 if (!buffer_mapped(bh)) {
3934 BUFFER_TRACE(bh, "still unmapped");
3935 goto unlock;
3936 }
3937 }
3938
3939 /* Ok, it's mapped. Make sure it's up-to-date */
3940 if (PageUptodate(page))
3941 set_buffer_uptodate(bh);
3942
3943 if (!buffer_uptodate(bh)) {
3944 err = -EIO;
3945 ll_rw_block(READ, 1, &bh);
3946 wait_on_buffer(bh);
3947 /* Uhhuh. Read error. Complain and punt. */
3948 if (!buffer_uptodate(bh))
3949 goto unlock;
3950 }
3951
3952 if (ext4_should_journal_data(inode)) {
3953 BUFFER_TRACE(bh, "get write access");
3954 err = ext4_journal_get_write_access(handle, bh);
3955 if (err)
3956 goto unlock;
3957 }
3958
3959 zero_user(page, offset, length);
3960
3961 BUFFER_TRACE(bh, "zeroed end of block");
3962
3963 err = 0;
3964 if (ext4_should_journal_data(inode)) {
3965 err = ext4_handle_dirty_metadata(handle, inode, bh);
3966 } else {
3967 if (ext4_should_order_data(inode))
3968 err = ext4_jbd2_file_inode(handle, inode);
3969 mark_buffer_dirty(bh);
3970 }
3971
3972 unlock:
3973 unlock_page(page);
3974 page_cache_release(page);
3975 return err;
3976 }
3977
3978 /*
3979 * Probably it should be a library function... search for first non-zero word
3980 * or memcmp with zero_page, whatever is better for particular architecture.
3981 * Linus?
3982 */
3983 static inline int all_zeroes(__le32 *p, __le32 *q)
3984 {
3985 while (p < q)
3986 if (*p++)
3987 return 0;
3988 return 1;
3989 }
3990
3991 /**
3992 * ext4_find_shared - find the indirect blocks for partial truncation.
3993 * @inode: inode in question
3994 * @depth: depth of the affected branch
3995 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
3996 * @chain: place to store the pointers to partial indirect blocks
3997 * @top: place to the (detached) top of branch
3998 *
3999 * This is a helper function used by ext4_truncate().
4000 *
4001 * When we do truncate() we may have to clean the ends of several
4002 * indirect blocks but leave the blocks themselves alive. Block is
4003 * partially truncated if some data below the new i_size is refered
4004 * from it (and it is on the path to the first completely truncated
4005 * data block, indeed). We have to free the top of that path along
4006 * with everything to the right of the path. Since no allocation
4007 * past the truncation point is possible until ext4_truncate()
4008 * finishes, we may safely do the latter, but top of branch may
4009 * require special attention - pageout below the truncation point
4010 * might try to populate it.
4011 *
4012 * We atomically detach the top of branch from the tree, store the
4013 * block number of its root in *@top, pointers to buffer_heads of
4014 * partially truncated blocks - in @chain[].bh and pointers to
4015 * their last elements that should not be removed - in
4016 * @chain[].p. Return value is the pointer to last filled element
4017 * of @chain.
4018 *
4019 * The work left to caller to do the actual freeing of subtrees:
4020 * a) free the subtree starting from *@top
4021 * b) free the subtrees whose roots are stored in
4022 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
4023 * c) free the subtrees growing from the inode past the @chain[0].
4024 * (no partially truncated stuff there). */
4025
4026 static Indirect *ext4_find_shared(struct inode *inode, int depth,
4027 ext4_lblk_t offsets[4], Indirect chain[4],
4028 __le32 *top)
4029 {
4030 Indirect *partial, *p;
4031 int k, err;
4032
4033 *top = 0;
4034 /* Make k index the deepest non-null offest + 1 */
4035 for (k = depth; k > 1 && !offsets[k-1]; k--)
4036 ;
4037 partial = ext4_get_branch(inode, k, offsets, chain, &err);
4038 /* Writer: pointers */
4039 if (!partial)
4040 partial = chain + k-1;
4041 /*
4042 * If the branch acquired continuation since we've looked at it -
4043 * fine, it should all survive and (new) top doesn't belong to us.
4044 */
4045 if (!partial->key && *partial->p)
4046 /* Writer: end */
4047 goto no_top;
4048 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
4049 ;
4050 /*
4051 * OK, we've found the last block that must survive. The rest of our
4052 * branch should be detached before unlocking. However, if that rest
4053 * of branch is all ours and does not grow immediately from the inode
4054 * it's easier to cheat and just decrement partial->p.
4055 */
4056 if (p == chain + k - 1 && p > chain) {
4057 p->p--;
4058 } else {
4059 *top = *p->p;
4060 /* Nope, don't do this in ext4. Must leave the tree intact */
4061 #if 0
4062 *p->p = 0;
4063 #endif
4064 }
4065 /* Writer: end */
4066
4067 while (partial > p) {
4068 brelse(partial->bh);
4069 partial--;
4070 }
4071 no_top:
4072 return partial;
4073 }
4074
4075 /*
4076 * Zero a number of block pointers in either an inode or an indirect block.
4077 * If we restart the transaction we must again get write access to the
4078 * indirect block for further modification.
4079 *
4080 * We release `count' blocks on disk, but (last - first) may be greater
4081 * than `count' because there can be holes in there.
4082 */
4083 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
4084 struct buffer_head *bh,
4085 ext4_fsblk_t block_to_free,
4086 unsigned long count, __le32 *first,
4087 __le32 *last)
4088 {
4089 __le32 *p;
4090 int flags = EXT4_FREE_BLOCKS_FORGET;
4091
4092 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4093 flags |= EXT4_FREE_BLOCKS_METADATA;
4094
4095 if (try_to_extend_transaction(handle, inode)) {
4096 if (bh) {
4097 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4098 ext4_handle_dirty_metadata(handle, inode, bh);
4099 }
4100 ext4_mark_inode_dirty(handle, inode);
4101 ext4_truncate_restart_trans(handle, inode,
4102 blocks_for_truncate(inode));
4103 if (bh) {
4104 BUFFER_TRACE(bh, "retaking write access");
4105 ext4_journal_get_write_access(handle, bh);
4106 }
4107 }
4108
4109 for (p = first; p < last; p++)
4110 *p = 0;
4111
4112 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
4113 }
4114
4115 /**
4116 * ext4_free_data - free a list of data blocks
4117 * @handle: handle for this transaction
4118 * @inode: inode we are dealing with
4119 * @this_bh: indirect buffer_head which contains *@first and *@last
4120 * @first: array of block numbers
4121 * @last: points immediately past the end of array
4122 *
4123 * We are freeing all blocks refered from that array (numbers are stored as
4124 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
4125 *
4126 * We accumulate contiguous runs of blocks to free. Conveniently, if these
4127 * blocks are contiguous then releasing them at one time will only affect one
4128 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
4129 * actually use a lot of journal space.
4130 *
4131 * @this_bh will be %NULL if @first and @last point into the inode's direct
4132 * block pointers.
4133 */
4134 static void ext4_free_data(handle_t *handle, struct inode *inode,
4135 struct buffer_head *this_bh,
4136 __le32 *first, __le32 *last)
4137 {
4138 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
4139 unsigned long count = 0; /* Number of blocks in the run */
4140 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
4141 corresponding to
4142 block_to_free */
4143 ext4_fsblk_t nr; /* Current block # */
4144 __le32 *p; /* Pointer into inode/ind
4145 for current block */
4146 int err;
4147
4148 if (this_bh) { /* For indirect block */
4149 BUFFER_TRACE(this_bh, "get_write_access");
4150 err = ext4_journal_get_write_access(handle, this_bh);
4151 /* Important: if we can't update the indirect pointers
4152 * to the blocks, we can't free them. */
4153 if (err)
4154 return;
4155 }
4156
4157 for (p = first; p < last; p++) {
4158 nr = le32_to_cpu(*p);
4159 if (nr) {
4160 /* accumulate blocks to free if they're contiguous */
4161 if (count == 0) {
4162 block_to_free = nr;
4163 block_to_free_p = p;
4164 count = 1;
4165 } else if (nr == block_to_free + count) {
4166 count++;
4167 } else {
4168 ext4_clear_blocks(handle, inode, this_bh,
4169 block_to_free,
4170 count, block_to_free_p, p);
4171 block_to_free = nr;
4172 block_to_free_p = p;
4173 count = 1;
4174 }
4175 }
4176 }
4177
4178 if (count > 0)
4179 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
4180 count, block_to_free_p, p);
4181
4182 if (this_bh) {
4183 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
4184
4185 /*
4186 * The buffer head should have an attached journal head at this
4187 * point. However, if the data is corrupted and an indirect
4188 * block pointed to itself, it would have been detached when
4189 * the block was cleared. Check for this instead of OOPSing.
4190 */
4191 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4192 ext4_handle_dirty_metadata(handle, inode, this_bh);
4193 else
4194 ext4_error(inode->i_sb, __func__,
4195 "circular indirect block detected, "
4196 "inode=%lu, block=%llu",
4197 inode->i_ino,
4198 (unsigned long long) this_bh->b_blocknr);
4199 }
4200 }
4201
4202 /**
4203 * ext4_free_branches - free an array of branches
4204 * @handle: JBD handle for this transaction
4205 * @inode: inode we are dealing with
4206 * @parent_bh: the buffer_head which contains *@first and *@last
4207 * @first: array of block numbers
4208 * @last: pointer immediately past the end of array
4209 * @depth: depth of the branches to free
4210 *
4211 * We are freeing all blocks refered from these branches (numbers are
4212 * stored as little-endian 32-bit) and updating @inode->i_blocks
4213 * appropriately.
4214 */
4215 static void ext4_free_branches(handle_t *handle, struct inode *inode,
4216 struct buffer_head *parent_bh,
4217 __le32 *first, __le32 *last, int depth)
4218 {
4219 ext4_fsblk_t nr;
4220 __le32 *p;
4221
4222 if (ext4_handle_is_aborted(handle))
4223 return;
4224
4225 if (depth--) {
4226 struct buffer_head *bh;
4227 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4228 p = last;
4229 while (--p >= first) {
4230 nr = le32_to_cpu(*p);
4231 if (!nr)
4232 continue; /* A hole */
4233
4234 /* Go read the buffer for the next level down */
4235 bh = sb_bread(inode->i_sb, nr);
4236
4237 /*
4238 * A read failure? Report error and clear slot
4239 * (should be rare).
4240 */
4241 if (!bh) {
4242 ext4_error(inode->i_sb, "ext4_free_branches",
4243 "Read failure, inode=%lu, block=%llu",
4244 inode->i_ino, nr);
4245 continue;
4246 }
4247
4248 /* This zaps the entire block. Bottom up. */
4249 BUFFER_TRACE(bh, "free child branches");
4250 ext4_free_branches(handle, inode, bh,
4251 (__le32 *) bh->b_data,
4252 (__le32 *) bh->b_data + addr_per_block,
4253 depth);
4254
4255 /*
4256 * We've probably journalled the indirect block several
4257 * times during the truncate. But it's no longer
4258 * needed and we now drop it from the transaction via
4259 * jbd2_journal_revoke().
4260 *
4261 * That's easy if it's exclusively part of this
4262 * transaction. But if it's part of the committing
4263 * transaction then jbd2_journal_forget() will simply
4264 * brelse() it. That means that if the underlying
4265 * block is reallocated in ext4_get_block(),
4266 * unmap_underlying_metadata() will find this block
4267 * and will try to get rid of it. damn, damn.
4268 *
4269 * If this block has already been committed to the
4270 * journal, a revoke record will be written. And
4271 * revoke records must be emitted *before* clearing
4272 * this block's bit in the bitmaps.
4273 */
4274 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
4275
4276 /*
4277 * Everything below this this pointer has been
4278 * released. Now let this top-of-subtree go.
4279 *
4280 * We want the freeing of this indirect block to be
4281 * atomic in the journal with the updating of the
4282 * bitmap block which owns it. So make some room in
4283 * the journal.
4284 *
4285 * We zero the parent pointer *after* freeing its
4286 * pointee in the bitmaps, so if extend_transaction()
4287 * for some reason fails to put the bitmap changes and
4288 * the release into the same transaction, recovery
4289 * will merely complain about releasing a free block,
4290 * rather than leaking blocks.
4291 */
4292 if (ext4_handle_is_aborted(handle))
4293 return;
4294 if (try_to_extend_transaction(handle, inode)) {
4295 ext4_mark_inode_dirty(handle, inode);
4296 ext4_truncate_restart_trans(handle, inode,
4297 blocks_for_truncate(inode));
4298 }
4299
4300 ext4_free_blocks(handle, inode, 0, nr, 1,
4301 EXT4_FREE_BLOCKS_METADATA);
4302
4303 if (parent_bh) {
4304 /*
4305 * The block which we have just freed is
4306 * pointed to by an indirect block: journal it
4307 */
4308 BUFFER_TRACE(parent_bh, "get_write_access");
4309 if (!ext4_journal_get_write_access(handle,
4310 parent_bh)){
4311 *p = 0;
4312 BUFFER_TRACE(parent_bh,
4313 "call ext4_handle_dirty_metadata");
4314 ext4_handle_dirty_metadata(handle,
4315 inode,
4316 parent_bh);
4317 }
4318 }
4319 }
4320 } else {
4321 /* We have reached the bottom of the tree. */
4322 BUFFER_TRACE(parent_bh, "free data blocks");
4323 ext4_free_data(handle, inode, parent_bh, first, last);
4324 }
4325 }
4326
4327 int ext4_can_truncate(struct inode *inode)
4328 {
4329 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4330 return 0;
4331 if (S_ISREG(inode->i_mode))
4332 return 1;
4333 if (S_ISDIR(inode->i_mode))
4334 return 1;
4335 if (S_ISLNK(inode->i_mode))
4336 return !ext4_inode_is_fast_symlink(inode);
4337 return 0;
4338 }
4339
4340 /*
4341 * ext4_truncate()
4342 *
4343 * We block out ext4_get_block() block instantiations across the entire
4344 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4345 * simultaneously on behalf of the same inode.
4346 *
4347 * As we work through the truncate and commmit bits of it to the journal there
4348 * is one core, guiding principle: the file's tree must always be consistent on
4349 * disk. We must be able to restart the truncate after a crash.
4350 *
4351 * The file's tree may be transiently inconsistent in memory (although it
4352 * probably isn't), but whenever we close off and commit a journal transaction,
4353 * the contents of (the filesystem + the journal) must be consistent and
4354 * restartable. It's pretty simple, really: bottom up, right to left (although
4355 * left-to-right works OK too).
4356 *
4357 * Note that at recovery time, journal replay occurs *before* the restart of
4358 * truncate against the orphan inode list.
4359 *
4360 * The committed inode has the new, desired i_size (which is the same as
4361 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4362 * that this inode's truncate did not complete and it will again call
4363 * ext4_truncate() to have another go. So there will be instantiated blocks
4364 * to the right of the truncation point in a crashed ext4 filesystem. But
4365 * that's fine - as long as they are linked from the inode, the post-crash
4366 * ext4_truncate() run will find them and release them.
4367 */
4368 void ext4_truncate(struct inode *inode)
4369 {
4370 handle_t *handle;
4371 struct ext4_inode_info *ei = EXT4_I(inode);
4372 __le32 *i_data = ei->i_data;
4373 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4374 struct address_space *mapping = inode->i_mapping;
4375 ext4_lblk_t offsets[4];
4376 Indirect chain[4];
4377 Indirect *partial;
4378 __le32 nr = 0;
4379 int n;
4380 ext4_lblk_t last_block;
4381 unsigned blocksize = inode->i_sb->s_blocksize;
4382
4383 if (!ext4_can_truncate(inode))
4384 return;
4385
4386 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4387 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
4388
4389 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4390 ext4_ext_truncate(inode);
4391 return;
4392 }
4393
4394 handle = start_transaction(inode);
4395 if (IS_ERR(handle))
4396 return; /* AKPM: return what? */
4397
4398 last_block = (inode->i_size + blocksize-1)
4399 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4400
4401 if (inode->i_size & (blocksize - 1))
4402 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4403 goto out_stop;
4404
4405 n = ext4_block_to_path(inode, last_block, offsets, NULL);
4406 if (n == 0)
4407 goto out_stop; /* error */
4408
4409 /*
4410 * OK. This truncate is going to happen. We add the inode to the
4411 * orphan list, so that if this truncate spans multiple transactions,
4412 * and we crash, we will resume the truncate when the filesystem
4413 * recovers. It also marks the inode dirty, to catch the new size.
4414 *
4415 * Implication: the file must always be in a sane, consistent
4416 * truncatable state while each transaction commits.
4417 */
4418 if (ext4_orphan_add(handle, inode))
4419 goto out_stop;
4420
4421 /*
4422 * From here we block out all ext4_get_block() callers who want to
4423 * modify the block allocation tree.
4424 */
4425 down_write(&ei->i_data_sem);
4426
4427 ext4_discard_preallocations(inode);
4428
4429 /*
4430 * The orphan list entry will now protect us from any crash which
4431 * occurs before the truncate completes, so it is now safe to propagate
4432 * the new, shorter inode size (held for now in i_size) into the
4433 * on-disk inode. We do this via i_disksize, which is the value which
4434 * ext4 *really* writes onto the disk inode.
4435 */
4436 ei->i_disksize = inode->i_size;
4437
4438 if (n == 1) { /* direct blocks */
4439 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4440 i_data + EXT4_NDIR_BLOCKS);
4441 goto do_indirects;
4442 }
4443
4444 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4445 /* Kill the top of shared branch (not detached) */
4446 if (nr) {
4447 if (partial == chain) {
4448 /* Shared branch grows from the inode */
4449 ext4_free_branches(handle, inode, NULL,
4450 &nr, &nr+1, (chain+n-1) - partial);
4451 *partial->p = 0;
4452 /*
4453 * We mark the inode dirty prior to restart,
4454 * and prior to stop. No need for it here.
4455 */
4456 } else {
4457 /* Shared branch grows from an indirect block */
4458 BUFFER_TRACE(partial->bh, "get_write_access");
4459 ext4_free_branches(handle, inode, partial->bh,
4460 partial->p,
4461 partial->p+1, (chain+n-1) - partial);
4462 }
4463 }
4464 /* Clear the ends of indirect blocks on the shared branch */
4465 while (partial > chain) {
4466 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4467 (__le32*)partial->bh->b_data+addr_per_block,
4468 (chain+n-1) - partial);
4469 BUFFER_TRACE(partial->bh, "call brelse");
4470 brelse(partial->bh);
4471 partial--;
4472 }
4473 do_indirects:
4474 /* Kill the remaining (whole) subtrees */
4475 switch (offsets[0]) {
4476 default:
4477 nr = i_data[EXT4_IND_BLOCK];
4478 if (nr) {
4479 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4480 i_data[EXT4_IND_BLOCK] = 0;
4481 }
4482 case EXT4_IND_BLOCK:
4483 nr = i_data[EXT4_DIND_BLOCK];
4484 if (nr) {
4485 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4486 i_data[EXT4_DIND_BLOCK] = 0;
4487 }
4488 case EXT4_DIND_BLOCK:
4489 nr = i_data[EXT4_TIND_BLOCK];
4490 if (nr) {
4491 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4492 i_data[EXT4_TIND_BLOCK] = 0;
4493 }
4494 case EXT4_TIND_BLOCK:
4495 ;
4496 }
4497
4498 up_write(&ei->i_data_sem);
4499 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4500 ext4_mark_inode_dirty(handle, inode);
4501
4502 /*
4503 * In a multi-transaction truncate, we only make the final transaction
4504 * synchronous
4505 */
4506 if (IS_SYNC(inode))
4507 ext4_handle_sync(handle);
4508 out_stop:
4509 /*
4510 * If this was a simple ftruncate(), and the file will remain alive
4511 * then we need to clear up the orphan record which we created above.
4512 * However, if this was a real unlink then we were called by
4513 * ext4_delete_inode(), and we allow that function to clean up the
4514 * orphan info for us.
4515 */
4516 if (inode->i_nlink)
4517 ext4_orphan_del(handle, inode);
4518
4519 ext4_journal_stop(handle);
4520 }
4521
4522 /*
4523 * ext4_get_inode_loc returns with an extra refcount against the inode's
4524 * underlying buffer_head on success. If 'in_mem' is true, we have all
4525 * data in memory that is needed to recreate the on-disk version of this
4526 * inode.
4527 */
4528 static int __ext4_get_inode_loc(struct inode *inode,
4529 struct ext4_iloc *iloc, int in_mem)
4530 {
4531 struct ext4_group_desc *gdp;
4532 struct buffer_head *bh;
4533 struct super_block *sb = inode->i_sb;
4534 ext4_fsblk_t block;
4535 int inodes_per_block, inode_offset;
4536
4537 iloc->bh = NULL;
4538 if (!ext4_valid_inum(sb, inode->i_ino))
4539 return -EIO;
4540
4541 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4542 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4543 if (!gdp)
4544 return -EIO;
4545
4546 /*
4547 * Figure out the offset within the block group inode table
4548 */
4549 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4550 inode_offset = ((inode->i_ino - 1) %
4551 EXT4_INODES_PER_GROUP(sb));
4552 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4553 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4554
4555 bh = sb_getblk(sb, block);
4556 if (!bh) {
4557 ext4_error(sb, "ext4_get_inode_loc", "unable to read "
4558 "inode block - inode=%lu, block=%llu",
4559 inode->i_ino, block);
4560 return -EIO;
4561 }
4562 if (!buffer_uptodate(bh)) {
4563 lock_buffer(bh);
4564
4565 /*
4566 * If the buffer has the write error flag, we have failed
4567 * to write out another inode in the same block. In this
4568 * case, we don't have to read the block because we may
4569 * read the old inode data successfully.
4570 */
4571 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4572 set_buffer_uptodate(bh);
4573
4574 if (buffer_uptodate(bh)) {
4575 /* someone brought it uptodate while we waited */
4576 unlock_buffer(bh);
4577 goto has_buffer;
4578 }
4579
4580 /*
4581 * If we have all information of the inode in memory and this
4582 * is the only valid inode in the block, we need not read the
4583 * block.
4584 */
4585 if (in_mem) {
4586 struct buffer_head *bitmap_bh;
4587 int i, start;
4588
4589 start = inode_offset & ~(inodes_per_block - 1);
4590
4591 /* Is the inode bitmap in cache? */
4592 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4593 if (!bitmap_bh)
4594 goto make_io;
4595
4596 /*
4597 * If the inode bitmap isn't in cache then the
4598 * optimisation may end up performing two reads instead
4599 * of one, so skip it.
4600 */
4601 if (!buffer_uptodate(bitmap_bh)) {
4602 brelse(bitmap_bh);
4603 goto make_io;
4604 }
4605 for (i = start; i < start + inodes_per_block; i++) {
4606 if (i == inode_offset)
4607 continue;
4608 if (ext4_test_bit(i, bitmap_bh->b_data))
4609 break;
4610 }
4611 brelse(bitmap_bh);
4612 if (i == start + inodes_per_block) {
4613 /* all other inodes are free, so skip I/O */
4614 memset(bh->b_data, 0, bh->b_size);
4615 set_buffer_uptodate(bh);
4616 unlock_buffer(bh);
4617 goto has_buffer;
4618 }
4619 }
4620
4621 make_io:
4622 /*
4623 * If we need to do any I/O, try to pre-readahead extra
4624 * blocks from the inode table.
4625 */
4626 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4627 ext4_fsblk_t b, end, table;
4628 unsigned num;
4629
4630 table = ext4_inode_table(sb, gdp);
4631 /* s_inode_readahead_blks is always a power of 2 */
4632 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4633 if (table > b)
4634 b = table;
4635 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4636 num = EXT4_INODES_PER_GROUP(sb);
4637 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4638 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4639 num -= ext4_itable_unused_count(sb, gdp);
4640 table += num / inodes_per_block;
4641 if (end > table)
4642 end = table;
4643 while (b <= end)
4644 sb_breadahead(sb, b++);
4645 }
4646
4647 /*
4648 * There are other valid inodes in the buffer, this inode
4649 * has in-inode xattrs, or we don't have this inode in memory.
4650 * Read the block from disk.
4651 */
4652 get_bh(bh);
4653 bh->b_end_io = end_buffer_read_sync;
4654 submit_bh(READ_META, bh);
4655 wait_on_buffer(bh);
4656 if (!buffer_uptodate(bh)) {
4657 ext4_error(sb, __func__,
4658 "unable to read inode block - inode=%lu, "
4659 "block=%llu", inode->i_ino, block);
4660 brelse(bh);
4661 return -EIO;
4662 }
4663 }
4664 has_buffer:
4665 iloc->bh = bh;
4666 return 0;
4667 }
4668
4669 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4670 {
4671 /* We have all inode data except xattrs in memory here. */
4672 return __ext4_get_inode_loc(inode, iloc,
4673 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
4674 }
4675
4676 void ext4_set_inode_flags(struct inode *inode)
4677 {
4678 unsigned int flags = EXT4_I(inode)->i_flags;
4679
4680 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4681 if (flags & EXT4_SYNC_FL)
4682 inode->i_flags |= S_SYNC;
4683 if (flags & EXT4_APPEND_FL)
4684 inode->i_flags |= S_APPEND;
4685 if (flags & EXT4_IMMUTABLE_FL)
4686 inode->i_flags |= S_IMMUTABLE;
4687 if (flags & EXT4_NOATIME_FL)
4688 inode->i_flags |= S_NOATIME;
4689 if (flags & EXT4_DIRSYNC_FL)
4690 inode->i_flags |= S_DIRSYNC;
4691 }
4692
4693 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4694 void ext4_get_inode_flags(struct ext4_inode_info *ei)
4695 {
4696 unsigned int flags = ei->vfs_inode.i_flags;
4697
4698 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4699 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
4700 if (flags & S_SYNC)
4701 ei->i_flags |= EXT4_SYNC_FL;
4702 if (flags & S_APPEND)
4703 ei->i_flags |= EXT4_APPEND_FL;
4704 if (flags & S_IMMUTABLE)
4705 ei->i_flags |= EXT4_IMMUTABLE_FL;
4706 if (flags & S_NOATIME)
4707 ei->i_flags |= EXT4_NOATIME_FL;
4708 if (flags & S_DIRSYNC)
4709 ei->i_flags |= EXT4_DIRSYNC_FL;
4710 }
4711
4712 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4713 struct ext4_inode_info *ei)
4714 {
4715 blkcnt_t i_blocks ;
4716 struct inode *inode = &(ei->vfs_inode);
4717 struct super_block *sb = inode->i_sb;
4718
4719 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4720 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4721 /* we are using combined 48 bit field */
4722 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4723 le32_to_cpu(raw_inode->i_blocks_lo);
4724 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
4725 /* i_blocks represent file system block size */
4726 return i_blocks << (inode->i_blkbits - 9);
4727 } else {
4728 return i_blocks;
4729 }
4730 } else {
4731 return le32_to_cpu(raw_inode->i_blocks_lo);
4732 }
4733 }
4734
4735 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4736 {
4737 struct ext4_iloc iloc;
4738 struct ext4_inode *raw_inode;
4739 struct ext4_inode_info *ei;
4740 struct inode *inode;
4741 long ret;
4742 int block;
4743
4744 inode = iget_locked(sb, ino);
4745 if (!inode)
4746 return ERR_PTR(-ENOMEM);
4747 if (!(inode->i_state & I_NEW))
4748 return inode;
4749
4750 ei = EXT4_I(inode);
4751 iloc.bh = 0;
4752
4753 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4754 if (ret < 0)
4755 goto bad_inode;
4756 raw_inode = ext4_raw_inode(&iloc);
4757 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4758 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4759 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4760 if (!(test_opt(inode->i_sb, NO_UID32))) {
4761 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4762 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4763 }
4764 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4765
4766 ei->i_state = 0;
4767 ei->i_dir_start_lookup = 0;
4768 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4769 /* We now have enough fields to check if the inode was active or not.
4770 * This is needed because nfsd might try to access dead inodes
4771 * the test is that same one that e2fsck uses
4772 * NeilBrown 1999oct15
4773 */
4774 if (inode->i_nlink == 0) {
4775 if (inode->i_mode == 0 ||
4776 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4777 /* this inode is deleted */
4778 ret = -ESTALE;
4779 goto bad_inode;
4780 }
4781 /* The only unlinked inodes we let through here have
4782 * valid i_mode and are being read by the orphan
4783 * recovery code: that's fine, we're about to complete
4784 * the process of deleting those. */
4785 }
4786 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4787 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4788 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4789 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4790 ei->i_file_acl |=
4791 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4792 inode->i_size = ext4_isize(raw_inode);
4793 ei->i_disksize = inode->i_size;
4794 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4795 ei->i_block_group = iloc.block_group;
4796 ei->i_last_alloc_group = ~0;
4797 /*
4798 * NOTE! The in-memory inode i_data array is in little-endian order
4799 * even on big-endian machines: we do NOT byteswap the block numbers!
4800 */
4801 for (block = 0; block < EXT4_N_BLOCKS; block++)
4802 ei->i_data[block] = raw_inode->i_block[block];
4803 INIT_LIST_HEAD(&ei->i_orphan);
4804
4805 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4806 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4807 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4808 EXT4_INODE_SIZE(inode->i_sb)) {
4809 ret = -EIO;
4810 goto bad_inode;
4811 }
4812 if (ei->i_extra_isize == 0) {
4813 /* The extra space is currently unused. Use it. */
4814 ei->i_extra_isize = sizeof(struct ext4_inode) -
4815 EXT4_GOOD_OLD_INODE_SIZE;
4816 } else {
4817 __le32 *magic = (void *)raw_inode +
4818 EXT4_GOOD_OLD_INODE_SIZE +
4819 ei->i_extra_isize;
4820 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4821 ei->i_state |= EXT4_STATE_XATTR;
4822 }
4823 } else
4824 ei->i_extra_isize = 0;
4825
4826 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4827 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4828 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4829 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4830
4831 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4832 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4833 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4834 inode->i_version |=
4835 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4836 }
4837
4838 ret = 0;
4839 if (ei->i_file_acl &&
4840 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4841 ext4_error(sb, __func__,
4842 "bad extended attribute block %llu in inode #%lu",
4843 ei->i_file_acl, inode->i_ino);
4844 ret = -EIO;
4845 goto bad_inode;
4846 } else if (ei->i_flags & EXT4_EXTENTS_FL) {
4847 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4848 (S_ISLNK(inode->i_mode) &&
4849 !ext4_inode_is_fast_symlink(inode)))
4850 /* Validate extent which is part of inode */
4851 ret = ext4_ext_check_inode(inode);
4852 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4853 (S_ISLNK(inode->i_mode) &&
4854 !ext4_inode_is_fast_symlink(inode))) {
4855 /* Validate block references which are part of inode */
4856 ret = ext4_check_inode_blockref(inode);
4857 }
4858 if (ret)
4859 goto bad_inode;
4860
4861 if (S_ISREG(inode->i_mode)) {
4862 inode->i_op = &ext4_file_inode_operations;
4863 inode->i_fop = &ext4_file_operations;
4864 ext4_set_aops(inode);
4865 } else if (S_ISDIR(inode->i_mode)) {
4866 inode->i_op = &ext4_dir_inode_operations;
4867 inode->i_fop = &ext4_dir_operations;
4868 } else if (S_ISLNK(inode->i_mode)) {
4869 if (ext4_inode_is_fast_symlink(inode)) {
4870 inode->i_op = &ext4_fast_symlink_inode_operations;
4871 nd_terminate_link(ei->i_data, inode->i_size,
4872 sizeof(ei->i_data) - 1);
4873 } else {
4874 inode->i_op = &ext4_symlink_inode_operations;
4875 ext4_set_aops(inode);
4876 }
4877 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4878 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4879 inode->i_op = &ext4_special_inode_operations;
4880 if (raw_inode->i_block[0])
4881 init_special_inode(inode, inode->i_mode,
4882 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4883 else
4884 init_special_inode(inode, inode->i_mode,
4885 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4886 } else {
4887 ret = -EIO;
4888 ext4_error(inode->i_sb, __func__,
4889 "bogus i_mode (%o) for inode=%lu",
4890 inode->i_mode, inode->i_ino);
4891 goto bad_inode;
4892 }
4893 brelse(iloc.bh);
4894 ext4_set_inode_flags(inode);
4895 unlock_new_inode(inode);
4896 return inode;
4897
4898 bad_inode:
4899 brelse(iloc.bh);
4900 iget_failed(inode);
4901 return ERR_PTR(ret);
4902 }
4903
4904 static int ext4_inode_blocks_set(handle_t *handle,
4905 struct ext4_inode *raw_inode,
4906 struct ext4_inode_info *ei)
4907 {
4908 struct inode *inode = &(ei->vfs_inode);
4909 u64 i_blocks = inode->i_blocks;
4910 struct super_block *sb = inode->i_sb;
4911
4912 if (i_blocks <= ~0U) {
4913 /*
4914 * i_blocks can be represnted in a 32 bit variable
4915 * as multiple of 512 bytes
4916 */
4917 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4918 raw_inode->i_blocks_high = 0;
4919 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4920 return 0;
4921 }
4922 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4923 return -EFBIG;
4924
4925 if (i_blocks <= 0xffffffffffffULL) {
4926 /*
4927 * i_blocks can be represented in a 48 bit variable
4928 * as multiple of 512 bytes
4929 */
4930 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4931 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4932 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4933 } else {
4934 ei->i_flags |= EXT4_HUGE_FILE_FL;
4935 /* i_block is stored in file system block size */
4936 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4937 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4938 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4939 }
4940 return 0;
4941 }
4942
4943 /*
4944 * Post the struct inode info into an on-disk inode location in the
4945 * buffer-cache. This gobbles the caller's reference to the
4946 * buffer_head in the inode location struct.
4947 *
4948 * The caller must have write access to iloc->bh.
4949 */
4950 static int ext4_do_update_inode(handle_t *handle,
4951 struct inode *inode,
4952 struct ext4_iloc *iloc)
4953 {
4954 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4955 struct ext4_inode_info *ei = EXT4_I(inode);
4956 struct buffer_head *bh = iloc->bh;
4957 int err = 0, rc, block;
4958
4959 /* For fields not not tracking in the in-memory inode,
4960 * initialise them to zero for new inodes. */
4961 if (ei->i_state & EXT4_STATE_NEW)
4962 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4963
4964 ext4_get_inode_flags(ei);
4965 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4966 if (!(test_opt(inode->i_sb, NO_UID32))) {
4967 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4968 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4969 /*
4970 * Fix up interoperability with old kernels. Otherwise, old inodes get
4971 * re-used with the upper 16 bits of the uid/gid intact
4972 */
4973 if (!ei->i_dtime) {
4974 raw_inode->i_uid_high =
4975 cpu_to_le16(high_16_bits(inode->i_uid));
4976 raw_inode->i_gid_high =
4977 cpu_to_le16(high_16_bits(inode->i_gid));
4978 } else {
4979 raw_inode->i_uid_high = 0;
4980 raw_inode->i_gid_high = 0;
4981 }
4982 } else {
4983 raw_inode->i_uid_low =
4984 cpu_to_le16(fs_high2lowuid(inode->i_uid));
4985 raw_inode->i_gid_low =
4986 cpu_to_le16(fs_high2lowgid(inode->i_gid));
4987 raw_inode->i_uid_high = 0;
4988 raw_inode->i_gid_high = 0;
4989 }
4990 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4991
4992 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4993 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4994 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4995 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4996
4997 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4998 goto out_brelse;
4999 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5000 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
5001 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
5002 cpu_to_le32(EXT4_OS_HURD))
5003 raw_inode->i_file_acl_high =
5004 cpu_to_le16(ei->i_file_acl >> 32);
5005 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5006 ext4_isize_set(raw_inode, ei->i_disksize);
5007 if (ei->i_disksize > 0x7fffffffULL) {
5008 struct super_block *sb = inode->i_sb;
5009 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
5010 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
5011 EXT4_SB(sb)->s_es->s_rev_level ==
5012 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
5013 /* If this is the first large file
5014 * created, add a flag to the superblock.
5015 */
5016 err = ext4_journal_get_write_access(handle,
5017 EXT4_SB(sb)->s_sbh);
5018 if (err)
5019 goto out_brelse;
5020 ext4_update_dynamic_rev(sb);
5021 EXT4_SET_RO_COMPAT_FEATURE(sb,
5022 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
5023 sb->s_dirt = 1;
5024 ext4_handle_sync(handle);
5025 err = ext4_handle_dirty_metadata(handle, inode,
5026 EXT4_SB(sb)->s_sbh);
5027 }
5028 }
5029 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5030 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5031 if (old_valid_dev(inode->i_rdev)) {
5032 raw_inode->i_block[0] =
5033 cpu_to_le32(old_encode_dev(inode->i_rdev));
5034 raw_inode->i_block[1] = 0;
5035 } else {
5036 raw_inode->i_block[0] = 0;
5037 raw_inode->i_block[1] =
5038 cpu_to_le32(new_encode_dev(inode->i_rdev));
5039 raw_inode->i_block[2] = 0;
5040 }
5041 } else
5042 for (block = 0; block < EXT4_N_BLOCKS; block++)
5043 raw_inode->i_block[block] = ei->i_data[block];
5044
5045 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
5046 if (ei->i_extra_isize) {
5047 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5048 raw_inode->i_version_hi =
5049 cpu_to_le32(inode->i_version >> 32);
5050 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
5051 }
5052
5053 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5054 rc = ext4_handle_dirty_metadata(handle, inode, bh);
5055 if (!err)
5056 err = rc;
5057 ei->i_state &= ~EXT4_STATE_NEW;
5058
5059 out_brelse:
5060 brelse(bh);
5061 ext4_std_error(inode->i_sb, err);
5062 return err;
5063 }
5064
5065 /*
5066 * ext4_write_inode()
5067 *
5068 * We are called from a few places:
5069 *
5070 * - Within generic_file_write() for O_SYNC files.
5071 * Here, there will be no transaction running. We wait for any running
5072 * trasnaction to commit.
5073 *
5074 * - Within sys_sync(), kupdate and such.
5075 * We wait on commit, if tol to.
5076 *
5077 * - Within prune_icache() (PF_MEMALLOC == true)
5078 * Here we simply return. We can't afford to block kswapd on the
5079 * journal commit.
5080 *
5081 * In all cases it is actually safe for us to return without doing anything,
5082 * because the inode has been copied into a raw inode buffer in
5083 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
5084 * knfsd.
5085 *
5086 * Note that we are absolutely dependent upon all inode dirtiers doing the
5087 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5088 * which we are interested.
5089 *
5090 * It would be a bug for them to not do this. The code:
5091 *
5092 * mark_inode_dirty(inode)
5093 * stuff();
5094 * inode->i_size = expr;
5095 *
5096 * is in error because a kswapd-driven write_inode() could occur while
5097 * `stuff()' is running, and the new i_size will be lost. Plus the inode
5098 * will no longer be on the superblock's dirty inode list.
5099 */
5100 int ext4_write_inode(struct inode *inode, int wait)
5101 {
5102 int err;
5103
5104 if (current->flags & PF_MEMALLOC)
5105 return 0;
5106
5107 if (EXT4_SB(inode->i_sb)->s_journal) {
5108 if (ext4_journal_current_handle()) {
5109 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5110 dump_stack();
5111 return -EIO;
5112 }
5113
5114 if (!wait)
5115 return 0;
5116
5117 err = ext4_force_commit(inode->i_sb);
5118 } else {
5119 struct ext4_iloc iloc;
5120
5121 err = ext4_get_inode_loc(inode, &iloc);
5122 if (err)
5123 return err;
5124 if (wait)
5125 sync_dirty_buffer(iloc.bh);
5126 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5127 ext4_error(inode->i_sb, __func__,
5128 "IO error syncing inode, "
5129 "inode=%lu, block=%llu",
5130 inode->i_ino,
5131 (unsigned long long)iloc.bh->b_blocknr);
5132 err = -EIO;
5133 }
5134 }
5135 return err;
5136 }
5137
5138 /*
5139 * ext4_setattr()
5140 *
5141 * Called from notify_change.
5142 *
5143 * We want to trap VFS attempts to truncate the file as soon as
5144 * possible. In particular, we want to make sure that when the VFS
5145 * shrinks i_size, we put the inode on the orphan list and modify
5146 * i_disksize immediately, so that during the subsequent flushing of
5147 * dirty pages and freeing of disk blocks, we can guarantee that any
5148 * commit will leave the blocks being flushed in an unused state on
5149 * disk. (On recovery, the inode will get truncated and the blocks will
5150 * be freed, so we have a strong guarantee that no future commit will
5151 * leave these blocks visible to the user.)
5152 *
5153 * Another thing we have to assure is that if we are in ordered mode
5154 * and inode is still attached to the committing transaction, we must
5155 * we start writeout of all the dirty pages which are being truncated.
5156 * This way we are sure that all the data written in the previous
5157 * transaction are already on disk (truncate waits for pages under
5158 * writeback).
5159 *
5160 * Called with inode->i_mutex down.
5161 */
5162 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5163 {
5164 struct inode *inode = dentry->d_inode;
5165 int error, rc = 0;
5166 const unsigned int ia_valid = attr->ia_valid;
5167
5168 error = inode_change_ok(inode, attr);
5169 if (error)
5170 return error;
5171
5172 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
5173 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
5174 handle_t *handle;
5175
5176 /* (user+group)*(old+new) structure, inode write (sb,
5177 * inode block, ? - but truncate inode update has it) */
5178 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
5179 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
5180 if (IS_ERR(handle)) {
5181 error = PTR_ERR(handle);
5182 goto err_out;
5183 }
5184 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
5185 if (error) {
5186 ext4_journal_stop(handle);
5187 return error;
5188 }
5189 /* Update corresponding info in inode so that everything is in
5190 * one transaction */
5191 if (attr->ia_valid & ATTR_UID)
5192 inode->i_uid = attr->ia_uid;
5193 if (attr->ia_valid & ATTR_GID)
5194 inode->i_gid = attr->ia_gid;
5195 error = ext4_mark_inode_dirty(handle, inode);
5196 ext4_journal_stop(handle);
5197 }
5198
5199 if (attr->ia_valid & ATTR_SIZE) {
5200 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
5201 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5202
5203 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5204 error = -EFBIG;
5205 goto err_out;
5206 }
5207 }
5208 }
5209
5210 if (S_ISREG(inode->i_mode) &&
5211 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
5212 handle_t *handle;
5213
5214 handle = ext4_journal_start(inode, 3);
5215 if (IS_ERR(handle)) {
5216 error = PTR_ERR(handle);
5217 goto err_out;
5218 }
5219
5220 error = ext4_orphan_add(handle, inode);
5221 EXT4_I(inode)->i_disksize = attr->ia_size;
5222 rc = ext4_mark_inode_dirty(handle, inode);
5223 if (!error)
5224 error = rc;
5225 ext4_journal_stop(handle);
5226
5227 if (ext4_should_order_data(inode)) {
5228 error = ext4_begin_ordered_truncate(inode,
5229 attr->ia_size);
5230 if (error) {
5231 /* Do as much error cleanup as possible */
5232 handle = ext4_journal_start(inode, 3);
5233 if (IS_ERR(handle)) {
5234 ext4_orphan_del(NULL, inode);
5235 goto err_out;
5236 }
5237 ext4_orphan_del(handle, inode);
5238 ext4_journal_stop(handle);
5239 goto err_out;
5240 }
5241 }
5242 }
5243
5244 rc = inode_setattr(inode, attr);
5245
5246 /* If inode_setattr's call to ext4_truncate failed to get a
5247 * transaction handle at all, we need to clean up the in-core
5248 * orphan list manually. */
5249 if (inode->i_nlink)
5250 ext4_orphan_del(NULL, inode);
5251
5252 if (!rc && (ia_valid & ATTR_MODE))
5253 rc = ext4_acl_chmod(inode);
5254
5255 err_out:
5256 ext4_std_error(inode->i_sb, error);
5257 if (!error)
5258 error = rc;
5259 return error;
5260 }
5261
5262 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5263 struct kstat *stat)
5264 {
5265 struct inode *inode;
5266 unsigned long delalloc_blocks;
5267
5268 inode = dentry->d_inode;
5269 generic_fillattr(inode, stat);
5270
5271 /*
5272 * We can't update i_blocks if the block allocation is delayed
5273 * otherwise in the case of system crash before the real block
5274 * allocation is done, we will have i_blocks inconsistent with
5275 * on-disk file blocks.
5276 * We always keep i_blocks updated together with real
5277 * allocation. But to not confuse with user, stat
5278 * will return the blocks that include the delayed allocation
5279 * blocks for this file.
5280 */
5281 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
5282 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5283 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
5284
5285 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5286 return 0;
5287 }
5288
5289 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5290 int chunk)
5291 {
5292 int indirects;
5293
5294 /* if nrblocks are contiguous */
5295 if (chunk) {
5296 /*
5297 * With N contiguous data blocks, it need at most
5298 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
5299 * 2 dindirect blocks
5300 * 1 tindirect block
5301 */
5302 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
5303 return indirects + 3;
5304 }
5305 /*
5306 * if nrblocks are not contiguous, worse case, each block touch
5307 * a indirect block, and each indirect block touch a double indirect
5308 * block, plus a triple indirect block
5309 */
5310 indirects = nrblocks * 2 + 1;
5311 return indirects;
5312 }
5313
5314 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5315 {
5316 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
5317 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5318 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5319 }
5320
5321 /*
5322 * Account for index blocks, block groups bitmaps and block group
5323 * descriptor blocks if modify datablocks and index blocks
5324 * worse case, the indexs blocks spread over different block groups
5325 *
5326 * If datablocks are discontiguous, they are possible to spread over
5327 * different block groups too. If they are contiugous, with flexbg,
5328 * they could still across block group boundary.
5329 *
5330 * Also account for superblock, inode, quota and xattr blocks
5331 */
5332 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5333 {
5334 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5335 int gdpblocks;
5336 int idxblocks;
5337 int ret = 0;
5338
5339 /*
5340 * How many index blocks need to touch to modify nrblocks?
5341 * The "Chunk" flag indicating whether the nrblocks is
5342 * physically contiguous on disk
5343 *
5344 * For Direct IO and fallocate, they calls get_block to allocate
5345 * one single extent at a time, so they could set the "Chunk" flag
5346 */
5347 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
5348
5349 ret = idxblocks;
5350
5351 /*
5352 * Now let's see how many group bitmaps and group descriptors need
5353 * to account
5354 */
5355 groups = idxblocks;
5356 if (chunk)
5357 groups += 1;
5358 else
5359 groups += nrblocks;
5360
5361 gdpblocks = groups;
5362 if (groups > ngroups)
5363 groups = ngroups;
5364 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5365 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5366
5367 /* bitmaps and block group descriptor blocks */
5368 ret += groups + gdpblocks;
5369
5370 /* Blocks for super block, inode, quota and xattr blocks */
5371 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5372
5373 return ret;
5374 }
5375
5376 /*
5377 * Calulate the total number of credits to reserve to fit
5378 * the modification of a single pages into a single transaction,
5379 * which may include multiple chunks of block allocations.
5380 *
5381 * This could be called via ext4_write_begin()
5382 *
5383 * We need to consider the worse case, when
5384 * one new block per extent.
5385 */
5386 int ext4_writepage_trans_blocks(struct inode *inode)
5387 {
5388 int bpp = ext4_journal_blocks_per_page(inode);
5389 int ret;
5390
5391 ret = ext4_meta_trans_blocks(inode, bpp, 0);
5392
5393 /* Account for data blocks for journalled mode */
5394 if (ext4_should_journal_data(inode))
5395 ret += bpp;
5396 return ret;
5397 }
5398
5399 /*
5400 * Calculate the journal credits for a chunk of data modification.
5401 *
5402 * This is called from DIO, fallocate or whoever calling
5403 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
5404 *
5405 * journal buffers for data blocks are not included here, as DIO
5406 * and fallocate do no need to journal data buffers.
5407 */
5408 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5409 {
5410 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5411 }
5412
5413 /*
5414 * The caller must have previously called ext4_reserve_inode_write().
5415 * Give this, we know that the caller already has write access to iloc->bh.
5416 */
5417 int ext4_mark_iloc_dirty(handle_t *handle,
5418 struct inode *inode, struct ext4_iloc *iloc)
5419 {
5420 int err = 0;
5421
5422 if (test_opt(inode->i_sb, I_VERSION))
5423 inode_inc_iversion(inode);
5424
5425 /* the do_update_inode consumes one bh->b_count */
5426 get_bh(iloc->bh);
5427
5428 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5429 err = ext4_do_update_inode(handle, inode, iloc);
5430 put_bh(iloc->bh);
5431 return err;
5432 }
5433
5434 /*
5435 * On success, We end up with an outstanding reference count against
5436 * iloc->bh. This _must_ be cleaned up later.
5437 */
5438
5439 int
5440 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5441 struct ext4_iloc *iloc)
5442 {
5443 int err;
5444
5445 err = ext4_get_inode_loc(inode, iloc);
5446 if (!err) {
5447 BUFFER_TRACE(iloc->bh, "get_write_access");
5448 err = ext4_journal_get_write_access(handle, iloc->bh);
5449 if (err) {
5450 brelse(iloc->bh);
5451 iloc->bh = NULL;
5452 }
5453 }
5454 ext4_std_error(inode->i_sb, err);
5455 return err;
5456 }
5457
5458 /*
5459 * Expand an inode by new_extra_isize bytes.
5460 * Returns 0 on success or negative error number on failure.
5461 */
5462 static int ext4_expand_extra_isize(struct inode *inode,
5463 unsigned int new_extra_isize,
5464 struct ext4_iloc iloc,
5465 handle_t *handle)
5466 {
5467 struct ext4_inode *raw_inode;
5468 struct ext4_xattr_ibody_header *header;
5469 struct ext4_xattr_entry *entry;
5470
5471 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5472 return 0;
5473
5474 raw_inode = ext4_raw_inode(&iloc);
5475
5476 header = IHDR(inode, raw_inode);
5477 entry = IFIRST(header);
5478
5479 /* No extended attributes present */
5480 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
5481 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5482 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5483 new_extra_isize);
5484 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5485 return 0;
5486 }
5487
5488 /* try to expand with EAs present */
5489 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5490 raw_inode, handle);
5491 }
5492
5493 /*
5494 * What we do here is to mark the in-core inode as clean with respect to inode
5495 * dirtiness (it may still be data-dirty).
5496 * This means that the in-core inode may be reaped by prune_icache
5497 * without having to perform any I/O. This is a very good thing,
5498 * because *any* task may call prune_icache - even ones which
5499 * have a transaction open against a different journal.
5500 *
5501 * Is this cheating? Not really. Sure, we haven't written the
5502 * inode out, but prune_icache isn't a user-visible syncing function.
5503 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5504 * we start and wait on commits.
5505 *
5506 * Is this efficient/effective? Well, we're being nice to the system
5507 * by cleaning up our inodes proactively so they can be reaped
5508 * without I/O. But we are potentially leaving up to five seconds'
5509 * worth of inodes floating about which prune_icache wants us to
5510 * write out. One way to fix that would be to get prune_icache()
5511 * to do a write_super() to free up some memory. It has the desired
5512 * effect.
5513 */
5514 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5515 {
5516 struct ext4_iloc iloc;
5517 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5518 static unsigned int mnt_count;
5519 int err, ret;
5520
5521 might_sleep();
5522 err = ext4_reserve_inode_write(handle, inode, &iloc);
5523 if (ext4_handle_valid(handle) &&
5524 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5525 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
5526 /*
5527 * We need extra buffer credits since we may write into EA block
5528 * with this same handle. If journal_extend fails, then it will
5529 * only result in a minor loss of functionality for that inode.
5530 * If this is felt to be critical, then e2fsck should be run to
5531 * force a large enough s_min_extra_isize.
5532 */
5533 if ((jbd2_journal_extend(handle,
5534 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
5535 ret = ext4_expand_extra_isize(inode,
5536 sbi->s_want_extra_isize,
5537 iloc, handle);
5538 if (ret) {
5539 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
5540 if (mnt_count !=
5541 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5542 ext4_warning(inode->i_sb, __func__,
5543 "Unable to expand inode %lu. Delete"
5544 " some EAs or run e2fsck.",
5545 inode->i_ino);
5546 mnt_count =
5547 le16_to_cpu(sbi->s_es->s_mnt_count);
5548 }
5549 }
5550 }
5551 }
5552 if (!err)
5553 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5554 return err;
5555 }
5556
5557 /*
5558 * ext4_dirty_inode() is called from __mark_inode_dirty()
5559 *
5560 * We're really interested in the case where a file is being extended.
5561 * i_size has been changed by generic_commit_write() and we thus need
5562 * to include the updated inode in the current transaction.
5563 *
5564 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
5565 * are allocated to the file.
5566 *
5567 * If the inode is marked synchronous, we don't honour that here - doing
5568 * so would cause a commit on atime updates, which we don't bother doing.
5569 * We handle synchronous inodes at the highest possible level.
5570 */
5571 void ext4_dirty_inode(struct inode *inode)
5572 {
5573 handle_t *handle;
5574
5575 handle = ext4_journal_start(inode, 2);
5576 if (IS_ERR(handle))
5577 goto out;
5578
5579 ext4_mark_inode_dirty(handle, inode);
5580
5581 ext4_journal_stop(handle);
5582 out:
5583 return;
5584 }
5585
5586 #if 0
5587 /*
5588 * Bind an inode's backing buffer_head into this transaction, to prevent
5589 * it from being flushed to disk early. Unlike
5590 * ext4_reserve_inode_write, this leaves behind no bh reference and
5591 * returns no iloc structure, so the caller needs to repeat the iloc
5592 * lookup to mark the inode dirty later.
5593 */
5594 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5595 {
5596 struct ext4_iloc iloc;
5597
5598 int err = 0;
5599 if (handle) {
5600 err = ext4_get_inode_loc(inode, &iloc);
5601 if (!err) {
5602 BUFFER_TRACE(iloc.bh, "get_write_access");
5603 err = jbd2_journal_get_write_access(handle, iloc.bh);
5604 if (!err)
5605 err = ext4_handle_dirty_metadata(handle,
5606 inode,
5607 iloc.bh);
5608 brelse(iloc.bh);
5609 }
5610 }
5611 ext4_std_error(inode->i_sb, err);
5612 return err;
5613 }
5614 #endif
5615
5616 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5617 {
5618 journal_t *journal;
5619 handle_t *handle;
5620 int err;
5621
5622 /*
5623 * We have to be very careful here: changing a data block's
5624 * journaling status dynamically is dangerous. If we write a
5625 * data block to the journal, change the status and then delete
5626 * that block, we risk forgetting to revoke the old log record
5627 * from the journal and so a subsequent replay can corrupt data.
5628 * So, first we make sure that the journal is empty and that
5629 * nobody is changing anything.
5630 */
5631
5632 journal = EXT4_JOURNAL(inode);
5633 if (!journal)
5634 return 0;
5635 if (is_journal_aborted(journal))
5636 return -EROFS;
5637
5638 jbd2_journal_lock_updates(journal);
5639 jbd2_journal_flush(journal);
5640
5641 /*
5642 * OK, there are no updates running now, and all cached data is
5643 * synced to disk. We are now in a completely consistent state
5644 * which doesn't have anything in the journal, and we know that
5645 * no filesystem updates are running, so it is safe to modify
5646 * the inode's in-core data-journaling state flag now.
5647 */
5648
5649 if (val)
5650 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
5651 else
5652 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5653 ext4_set_aops(inode);
5654
5655 jbd2_journal_unlock_updates(journal);
5656
5657 /* Finally we can mark the inode as dirty. */
5658
5659 handle = ext4_journal_start(inode, 1);
5660 if (IS_ERR(handle))
5661 return PTR_ERR(handle);
5662
5663 err = ext4_mark_inode_dirty(handle, inode);
5664 ext4_handle_sync(handle);
5665 ext4_journal_stop(handle);
5666 ext4_std_error(inode->i_sb, err);
5667
5668 return err;
5669 }
5670
5671 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5672 {
5673 return !buffer_mapped(bh);
5674 }
5675
5676 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5677 {
5678 struct page *page = vmf->page;
5679 loff_t size;
5680 unsigned long len;
5681 int ret = -EINVAL;
5682 void *fsdata;
5683 struct file *file = vma->vm_file;
5684 struct inode *inode = file->f_path.dentry->d_inode;
5685 struct address_space *mapping = inode->i_mapping;
5686
5687 /*
5688 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5689 * get i_mutex because we are already holding mmap_sem.
5690 */
5691 down_read(&inode->i_alloc_sem);
5692 size = i_size_read(inode);
5693 if (page->mapping != mapping || size <= page_offset(page)
5694 || !PageUptodate(page)) {
5695 /* page got truncated from under us? */
5696 goto out_unlock;
5697 }
5698 ret = 0;
5699 if (PageMappedToDisk(page))
5700 goto out_unlock;
5701
5702 if (page->index == size >> PAGE_CACHE_SHIFT)
5703 len = size & ~PAGE_CACHE_MASK;
5704 else
5705 len = PAGE_CACHE_SIZE;
5706
5707 lock_page(page);
5708 /*
5709 * return if we have all the buffers mapped. This avoid
5710 * the need to call write_begin/write_end which does a
5711 * journal_start/journal_stop which can block and take
5712 * long time
5713 */
5714 if (page_has_buffers(page)) {
5715 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5716 ext4_bh_unmapped)) {
5717 unlock_page(page);
5718 goto out_unlock;
5719 }
5720 }
5721 unlock_page(page);
5722 /*
5723 * OK, we need to fill the hole... Do write_begin write_end
5724 * to do block allocation/reservation.We are not holding
5725 * inode.i__mutex here. That allow * parallel write_begin,
5726 * write_end call. lock_page prevent this from happening
5727 * on the same page though
5728 */
5729 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
5730 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
5731 if (ret < 0)
5732 goto out_unlock;
5733 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
5734 len, len, page, fsdata);
5735 if (ret < 0)
5736 goto out_unlock;
5737 ret = 0;
5738 out_unlock:
5739 if (ret)
5740 ret = VM_FAULT_SIGBUS;
5741 up_read(&inode->i_alloc_sem);
5742 return ret;
5743 }
This page took 0.163967 seconds and 5 git commands to generate.