ext4: Patch up how we claim metadata blocks for quota purposes
[deliverable/linux.git] / fs / ext4 / inode.c
1 /*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
21 *
22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23 */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #include <linux/mpage.h>
37 #include <linux/namei.h>
38 #include <linux/uio.h>
39 #include <linux/bio.h>
40 #include <linux/workqueue.h>
41
42 #include "ext4_jbd2.h"
43 #include "xattr.h"
44 #include "acl.h"
45 #include "ext4_extents.h"
46
47 #include <trace/events/ext4.h>
48
49 #define MPAGE_DA_EXTENT_TAIL 0x01
50
51 static inline int ext4_begin_ordered_truncate(struct inode *inode,
52 loff_t new_size)
53 {
54 return jbd2_journal_begin_ordered_truncate(
55 EXT4_SB(inode->i_sb)->s_journal,
56 &EXT4_I(inode)->jinode,
57 new_size);
58 }
59
60 static void ext4_invalidatepage(struct page *page, unsigned long offset);
61
62 /*
63 * Test whether an inode is a fast symlink.
64 */
65 static int ext4_inode_is_fast_symlink(struct inode *inode)
66 {
67 int ea_blocks = EXT4_I(inode)->i_file_acl ?
68 (inode->i_sb->s_blocksize >> 9) : 0;
69
70 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
71 }
72
73 /*
74 * Work out how many blocks we need to proceed with the next chunk of a
75 * truncate transaction.
76 */
77 static unsigned long blocks_for_truncate(struct inode *inode)
78 {
79 ext4_lblk_t needed;
80
81 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
82
83 /* Give ourselves just enough room to cope with inodes in which
84 * i_blocks is corrupt: we've seen disk corruptions in the past
85 * which resulted in random data in an inode which looked enough
86 * like a regular file for ext4 to try to delete it. Things
87 * will go a bit crazy if that happens, but at least we should
88 * try not to panic the whole kernel. */
89 if (needed < 2)
90 needed = 2;
91
92 /* But we need to bound the transaction so we don't overflow the
93 * journal. */
94 if (needed > EXT4_MAX_TRANS_DATA)
95 needed = EXT4_MAX_TRANS_DATA;
96
97 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
98 }
99
100 /*
101 * Truncate transactions can be complex and absolutely huge. So we need to
102 * be able to restart the transaction at a conventient checkpoint to make
103 * sure we don't overflow the journal.
104 *
105 * start_transaction gets us a new handle for a truncate transaction,
106 * and extend_transaction tries to extend the existing one a bit. If
107 * extend fails, we need to propagate the failure up and restart the
108 * transaction in the top-level truncate loop. --sct
109 */
110 static handle_t *start_transaction(struct inode *inode)
111 {
112 handle_t *result;
113
114 result = ext4_journal_start(inode, blocks_for_truncate(inode));
115 if (!IS_ERR(result))
116 return result;
117
118 ext4_std_error(inode->i_sb, PTR_ERR(result));
119 return result;
120 }
121
122 /*
123 * Try to extend this transaction for the purposes of truncation.
124 *
125 * Returns 0 if we managed to create more room. If we can't create more
126 * room, and the transaction must be restarted we return 1.
127 */
128 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
129 {
130 if (!ext4_handle_valid(handle))
131 return 0;
132 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
133 return 0;
134 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
135 return 0;
136 return 1;
137 }
138
139 /*
140 * Restart the transaction associated with *handle. This does a commit,
141 * so before we call here everything must be consistently dirtied against
142 * this transaction.
143 */
144 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
145 int nblocks)
146 {
147 int ret;
148
149 /*
150 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
151 * moment, get_block can be called only for blocks inside i_size since
152 * page cache has been already dropped and writes are blocked by
153 * i_mutex. So we can safely drop the i_data_sem here.
154 */
155 BUG_ON(EXT4_JOURNAL(inode) == NULL);
156 jbd_debug(2, "restarting handle %p\n", handle);
157 up_write(&EXT4_I(inode)->i_data_sem);
158 ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
159 down_write(&EXT4_I(inode)->i_data_sem);
160 ext4_discard_preallocations(inode);
161
162 return ret;
163 }
164
165 /*
166 * Called at the last iput() if i_nlink is zero.
167 */
168 void ext4_delete_inode(struct inode *inode)
169 {
170 handle_t *handle;
171 int err;
172
173 if (ext4_should_order_data(inode))
174 ext4_begin_ordered_truncate(inode, 0);
175 truncate_inode_pages(&inode->i_data, 0);
176
177 if (is_bad_inode(inode))
178 goto no_delete;
179
180 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
181 if (IS_ERR(handle)) {
182 ext4_std_error(inode->i_sb, PTR_ERR(handle));
183 /*
184 * If we're going to skip the normal cleanup, we still need to
185 * make sure that the in-core orphan linked list is properly
186 * cleaned up.
187 */
188 ext4_orphan_del(NULL, inode);
189 goto no_delete;
190 }
191
192 if (IS_SYNC(inode))
193 ext4_handle_sync(handle);
194 inode->i_size = 0;
195 err = ext4_mark_inode_dirty(handle, inode);
196 if (err) {
197 ext4_warning(inode->i_sb, __func__,
198 "couldn't mark inode dirty (err %d)", err);
199 goto stop_handle;
200 }
201 if (inode->i_blocks)
202 ext4_truncate(inode);
203
204 /*
205 * ext4_ext_truncate() doesn't reserve any slop when it
206 * restarts journal transactions; therefore there may not be
207 * enough credits left in the handle to remove the inode from
208 * the orphan list and set the dtime field.
209 */
210 if (!ext4_handle_has_enough_credits(handle, 3)) {
211 err = ext4_journal_extend(handle, 3);
212 if (err > 0)
213 err = ext4_journal_restart(handle, 3);
214 if (err != 0) {
215 ext4_warning(inode->i_sb, __func__,
216 "couldn't extend journal (err %d)", err);
217 stop_handle:
218 ext4_journal_stop(handle);
219 goto no_delete;
220 }
221 }
222
223 /*
224 * Kill off the orphan record which ext4_truncate created.
225 * AKPM: I think this can be inside the above `if'.
226 * Note that ext4_orphan_del() has to be able to cope with the
227 * deletion of a non-existent orphan - this is because we don't
228 * know if ext4_truncate() actually created an orphan record.
229 * (Well, we could do this if we need to, but heck - it works)
230 */
231 ext4_orphan_del(handle, inode);
232 EXT4_I(inode)->i_dtime = get_seconds();
233
234 /*
235 * One subtle ordering requirement: if anything has gone wrong
236 * (transaction abort, IO errors, whatever), then we can still
237 * do these next steps (the fs will already have been marked as
238 * having errors), but we can't free the inode if the mark_dirty
239 * fails.
240 */
241 if (ext4_mark_inode_dirty(handle, inode))
242 /* If that failed, just do the required in-core inode clear. */
243 clear_inode(inode);
244 else
245 ext4_free_inode(handle, inode);
246 ext4_journal_stop(handle);
247 return;
248 no_delete:
249 clear_inode(inode); /* We must guarantee clearing of inode... */
250 }
251
252 typedef struct {
253 __le32 *p;
254 __le32 key;
255 struct buffer_head *bh;
256 } Indirect;
257
258 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
259 {
260 p->key = *(p->p = v);
261 p->bh = bh;
262 }
263
264 /**
265 * ext4_block_to_path - parse the block number into array of offsets
266 * @inode: inode in question (we are only interested in its superblock)
267 * @i_block: block number to be parsed
268 * @offsets: array to store the offsets in
269 * @boundary: set this non-zero if the referred-to block is likely to be
270 * followed (on disk) by an indirect block.
271 *
272 * To store the locations of file's data ext4 uses a data structure common
273 * for UNIX filesystems - tree of pointers anchored in the inode, with
274 * data blocks at leaves and indirect blocks in intermediate nodes.
275 * This function translates the block number into path in that tree -
276 * return value is the path length and @offsets[n] is the offset of
277 * pointer to (n+1)th node in the nth one. If @block is out of range
278 * (negative or too large) warning is printed and zero returned.
279 *
280 * Note: function doesn't find node addresses, so no IO is needed. All
281 * we need to know is the capacity of indirect blocks (taken from the
282 * inode->i_sb).
283 */
284
285 /*
286 * Portability note: the last comparison (check that we fit into triple
287 * indirect block) is spelled differently, because otherwise on an
288 * architecture with 32-bit longs and 8Kb pages we might get into trouble
289 * if our filesystem had 8Kb blocks. We might use long long, but that would
290 * kill us on x86. Oh, well, at least the sign propagation does not matter -
291 * i_block would have to be negative in the very beginning, so we would not
292 * get there at all.
293 */
294
295 static int ext4_block_to_path(struct inode *inode,
296 ext4_lblk_t i_block,
297 ext4_lblk_t offsets[4], int *boundary)
298 {
299 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
300 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
301 const long direct_blocks = EXT4_NDIR_BLOCKS,
302 indirect_blocks = ptrs,
303 double_blocks = (1 << (ptrs_bits * 2));
304 int n = 0;
305 int final = 0;
306
307 if (i_block < direct_blocks) {
308 offsets[n++] = i_block;
309 final = direct_blocks;
310 } else if ((i_block -= direct_blocks) < indirect_blocks) {
311 offsets[n++] = EXT4_IND_BLOCK;
312 offsets[n++] = i_block;
313 final = ptrs;
314 } else if ((i_block -= indirect_blocks) < double_blocks) {
315 offsets[n++] = EXT4_DIND_BLOCK;
316 offsets[n++] = i_block >> ptrs_bits;
317 offsets[n++] = i_block & (ptrs - 1);
318 final = ptrs;
319 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
320 offsets[n++] = EXT4_TIND_BLOCK;
321 offsets[n++] = i_block >> (ptrs_bits * 2);
322 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
323 offsets[n++] = i_block & (ptrs - 1);
324 final = ptrs;
325 } else {
326 ext4_warning(inode->i_sb, "ext4_block_to_path",
327 "block %lu > max in inode %lu",
328 i_block + direct_blocks +
329 indirect_blocks + double_blocks, inode->i_ino);
330 }
331 if (boundary)
332 *boundary = final - 1 - (i_block & (ptrs - 1));
333 return n;
334 }
335
336 static int __ext4_check_blockref(const char *function, struct inode *inode,
337 __le32 *p, unsigned int max)
338 {
339 __le32 *bref = p;
340 unsigned int blk;
341
342 while (bref < p+max) {
343 blk = le32_to_cpu(*bref++);
344 if (blk &&
345 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
346 blk, 1))) {
347 ext4_error(inode->i_sb, function,
348 "invalid block reference %u "
349 "in inode #%lu", blk, inode->i_ino);
350 return -EIO;
351 }
352 }
353 return 0;
354 }
355
356
357 #define ext4_check_indirect_blockref(inode, bh) \
358 __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
359 EXT4_ADDR_PER_BLOCK((inode)->i_sb))
360
361 #define ext4_check_inode_blockref(inode) \
362 __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
363 EXT4_NDIR_BLOCKS)
364
365 /**
366 * ext4_get_branch - read the chain of indirect blocks leading to data
367 * @inode: inode in question
368 * @depth: depth of the chain (1 - direct pointer, etc.)
369 * @offsets: offsets of pointers in inode/indirect blocks
370 * @chain: place to store the result
371 * @err: here we store the error value
372 *
373 * Function fills the array of triples <key, p, bh> and returns %NULL
374 * if everything went OK or the pointer to the last filled triple
375 * (incomplete one) otherwise. Upon the return chain[i].key contains
376 * the number of (i+1)-th block in the chain (as it is stored in memory,
377 * i.e. little-endian 32-bit), chain[i].p contains the address of that
378 * number (it points into struct inode for i==0 and into the bh->b_data
379 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
380 * block for i>0 and NULL for i==0. In other words, it holds the block
381 * numbers of the chain, addresses they were taken from (and where we can
382 * verify that chain did not change) and buffer_heads hosting these
383 * numbers.
384 *
385 * Function stops when it stumbles upon zero pointer (absent block)
386 * (pointer to last triple returned, *@err == 0)
387 * or when it gets an IO error reading an indirect block
388 * (ditto, *@err == -EIO)
389 * or when it reads all @depth-1 indirect blocks successfully and finds
390 * the whole chain, all way to the data (returns %NULL, *err == 0).
391 *
392 * Need to be called with
393 * down_read(&EXT4_I(inode)->i_data_sem)
394 */
395 static Indirect *ext4_get_branch(struct inode *inode, int depth,
396 ext4_lblk_t *offsets,
397 Indirect chain[4], int *err)
398 {
399 struct super_block *sb = inode->i_sb;
400 Indirect *p = chain;
401 struct buffer_head *bh;
402
403 *err = 0;
404 /* i_data is not going away, no lock needed */
405 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
406 if (!p->key)
407 goto no_block;
408 while (--depth) {
409 bh = sb_getblk(sb, le32_to_cpu(p->key));
410 if (unlikely(!bh))
411 goto failure;
412
413 if (!bh_uptodate_or_lock(bh)) {
414 if (bh_submit_read(bh) < 0) {
415 put_bh(bh);
416 goto failure;
417 }
418 /* validate block references */
419 if (ext4_check_indirect_blockref(inode, bh)) {
420 put_bh(bh);
421 goto failure;
422 }
423 }
424
425 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
426 /* Reader: end */
427 if (!p->key)
428 goto no_block;
429 }
430 return NULL;
431
432 failure:
433 *err = -EIO;
434 no_block:
435 return p;
436 }
437
438 /**
439 * ext4_find_near - find a place for allocation with sufficient locality
440 * @inode: owner
441 * @ind: descriptor of indirect block.
442 *
443 * This function returns the preferred place for block allocation.
444 * It is used when heuristic for sequential allocation fails.
445 * Rules are:
446 * + if there is a block to the left of our position - allocate near it.
447 * + if pointer will live in indirect block - allocate near that block.
448 * + if pointer will live in inode - allocate in the same
449 * cylinder group.
450 *
451 * In the latter case we colour the starting block by the callers PID to
452 * prevent it from clashing with concurrent allocations for a different inode
453 * in the same block group. The PID is used here so that functionally related
454 * files will be close-by on-disk.
455 *
456 * Caller must make sure that @ind is valid and will stay that way.
457 */
458 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
459 {
460 struct ext4_inode_info *ei = EXT4_I(inode);
461 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
462 __le32 *p;
463 ext4_fsblk_t bg_start;
464 ext4_fsblk_t last_block;
465 ext4_grpblk_t colour;
466 ext4_group_t block_group;
467 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
468
469 /* Try to find previous block */
470 for (p = ind->p - 1; p >= start; p--) {
471 if (*p)
472 return le32_to_cpu(*p);
473 }
474
475 /* No such thing, so let's try location of indirect block */
476 if (ind->bh)
477 return ind->bh->b_blocknr;
478
479 /*
480 * It is going to be referred to from the inode itself? OK, just put it
481 * into the same cylinder group then.
482 */
483 block_group = ei->i_block_group;
484 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
485 block_group &= ~(flex_size-1);
486 if (S_ISREG(inode->i_mode))
487 block_group++;
488 }
489 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
490 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
491
492 /*
493 * If we are doing delayed allocation, we don't need take
494 * colour into account.
495 */
496 if (test_opt(inode->i_sb, DELALLOC))
497 return bg_start;
498
499 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
500 colour = (current->pid % 16) *
501 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
502 else
503 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
504 return bg_start + colour;
505 }
506
507 /**
508 * ext4_find_goal - find a preferred place for allocation.
509 * @inode: owner
510 * @block: block we want
511 * @partial: pointer to the last triple within a chain
512 *
513 * Normally this function find the preferred place for block allocation,
514 * returns it.
515 * Because this is only used for non-extent files, we limit the block nr
516 * to 32 bits.
517 */
518 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
519 Indirect *partial)
520 {
521 ext4_fsblk_t goal;
522
523 /*
524 * XXX need to get goal block from mballoc's data structures
525 */
526
527 goal = ext4_find_near(inode, partial);
528 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
529 return goal;
530 }
531
532 /**
533 * ext4_blks_to_allocate: Look up the block map and count the number
534 * of direct blocks need to be allocated for the given branch.
535 *
536 * @branch: chain of indirect blocks
537 * @k: number of blocks need for indirect blocks
538 * @blks: number of data blocks to be mapped.
539 * @blocks_to_boundary: the offset in the indirect block
540 *
541 * return the total number of blocks to be allocate, including the
542 * direct and indirect blocks.
543 */
544 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
545 int blocks_to_boundary)
546 {
547 unsigned int count = 0;
548
549 /*
550 * Simple case, [t,d]Indirect block(s) has not allocated yet
551 * then it's clear blocks on that path have not allocated
552 */
553 if (k > 0) {
554 /* right now we don't handle cross boundary allocation */
555 if (blks < blocks_to_boundary + 1)
556 count += blks;
557 else
558 count += blocks_to_boundary + 1;
559 return count;
560 }
561
562 count++;
563 while (count < blks && count <= blocks_to_boundary &&
564 le32_to_cpu(*(branch[0].p + count)) == 0) {
565 count++;
566 }
567 return count;
568 }
569
570 /**
571 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
572 * @indirect_blks: the number of blocks need to allocate for indirect
573 * blocks
574 *
575 * @new_blocks: on return it will store the new block numbers for
576 * the indirect blocks(if needed) and the first direct block,
577 * @blks: on return it will store the total number of allocated
578 * direct blocks
579 */
580 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
581 ext4_lblk_t iblock, ext4_fsblk_t goal,
582 int indirect_blks, int blks,
583 ext4_fsblk_t new_blocks[4], int *err)
584 {
585 struct ext4_allocation_request ar;
586 int target, i;
587 unsigned long count = 0, blk_allocated = 0;
588 int index = 0;
589 ext4_fsblk_t current_block = 0;
590 int ret = 0;
591
592 /*
593 * Here we try to allocate the requested multiple blocks at once,
594 * on a best-effort basis.
595 * To build a branch, we should allocate blocks for
596 * the indirect blocks(if not allocated yet), and at least
597 * the first direct block of this branch. That's the
598 * minimum number of blocks need to allocate(required)
599 */
600 /* first we try to allocate the indirect blocks */
601 target = indirect_blks;
602 while (target > 0) {
603 count = target;
604 /* allocating blocks for indirect blocks and direct blocks */
605 current_block = ext4_new_meta_blocks(handle, inode,
606 goal, &count, err);
607 if (*err)
608 goto failed_out;
609
610 BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
611
612 target -= count;
613 /* allocate blocks for indirect blocks */
614 while (index < indirect_blks && count) {
615 new_blocks[index++] = current_block++;
616 count--;
617 }
618 if (count > 0) {
619 /*
620 * save the new block number
621 * for the first direct block
622 */
623 new_blocks[index] = current_block;
624 printk(KERN_INFO "%s returned more blocks than "
625 "requested\n", __func__);
626 WARN_ON(1);
627 break;
628 }
629 }
630
631 target = blks - count ;
632 blk_allocated = count;
633 if (!target)
634 goto allocated;
635 /* Now allocate data blocks */
636 memset(&ar, 0, sizeof(ar));
637 ar.inode = inode;
638 ar.goal = goal;
639 ar.len = target;
640 ar.logical = iblock;
641 if (S_ISREG(inode->i_mode))
642 /* enable in-core preallocation only for regular files */
643 ar.flags = EXT4_MB_HINT_DATA;
644
645 current_block = ext4_mb_new_blocks(handle, &ar, err);
646 BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
647
648 if (*err && (target == blks)) {
649 /*
650 * if the allocation failed and we didn't allocate
651 * any blocks before
652 */
653 goto failed_out;
654 }
655 if (!*err) {
656 if (target == blks) {
657 /*
658 * save the new block number
659 * for the first direct block
660 */
661 new_blocks[index] = current_block;
662 }
663 blk_allocated += ar.len;
664 }
665 allocated:
666 /* total number of blocks allocated for direct blocks */
667 ret = blk_allocated;
668 *err = 0;
669 return ret;
670 failed_out:
671 for (i = 0; i < index; i++)
672 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
673 return ret;
674 }
675
676 /**
677 * ext4_alloc_branch - allocate and set up a chain of blocks.
678 * @inode: owner
679 * @indirect_blks: number of allocated indirect blocks
680 * @blks: number of allocated direct blocks
681 * @offsets: offsets (in the blocks) to store the pointers to next.
682 * @branch: place to store the chain in.
683 *
684 * This function allocates blocks, zeroes out all but the last one,
685 * links them into chain and (if we are synchronous) writes them to disk.
686 * In other words, it prepares a branch that can be spliced onto the
687 * inode. It stores the information about that chain in the branch[], in
688 * the same format as ext4_get_branch() would do. We are calling it after
689 * we had read the existing part of chain and partial points to the last
690 * triple of that (one with zero ->key). Upon the exit we have the same
691 * picture as after the successful ext4_get_block(), except that in one
692 * place chain is disconnected - *branch->p is still zero (we did not
693 * set the last link), but branch->key contains the number that should
694 * be placed into *branch->p to fill that gap.
695 *
696 * If allocation fails we free all blocks we've allocated (and forget
697 * their buffer_heads) and return the error value the from failed
698 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
699 * as described above and return 0.
700 */
701 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
702 ext4_lblk_t iblock, int indirect_blks,
703 int *blks, ext4_fsblk_t goal,
704 ext4_lblk_t *offsets, Indirect *branch)
705 {
706 int blocksize = inode->i_sb->s_blocksize;
707 int i, n = 0;
708 int err = 0;
709 struct buffer_head *bh;
710 int num;
711 ext4_fsblk_t new_blocks[4];
712 ext4_fsblk_t current_block;
713
714 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
715 *blks, new_blocks, &err);
716 if (err)
717 return err;
718
719 branch[0].key = cpu_to_le32(new_blocks[0]);
720 /*
721 * metadata blocks and data blocks are allocated.
722 */
723 for (n = 1; n <= indirect_blks; n++) {
724 /*
725 * Get buffer_head for parent block, zero it out
726 * and set the pointer to new one, then send
727 * parent to disk.
728 */
729 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
730 branch[n].bh = bh;
731 lock_buffer(bh);
732 BUFFER_TRACE(bh, "call get_create_access");
733 err = ext4_journal_get_create_access(handle, bh);
734 if (err) {
735 /* Don't brelse(bh) here; it's done in
736 * ext4_journal_forget() below */
737 unlock_buffer(bh);
738 goto failed;
739 }
740
741 memset(bh->b_data, 0, blocksize);
742 branch[n].p = (__le32 *) bh->b_data + offsets[n];
743 branch[n].key = cpu_to_le32(new_blocks[n]);
744 *branch[n].p = branch[n].key;
745 if (n == indirect_blks) {
746 current_block = new_blocks[n];
747 /*
748 * End of chain, update the last new metablock of
749 * the chain to point to the new allocated
750 * data blocks numbers
751 */
752 for (i = 1; i < num; i++)
753 *(branch[n].p + i) = cpu_to_le32(++current_block);
754 }
755 BUFFER_TRACE(bh, "marking uptodate");
756 set_buffer_uptodate(bh);
757 unlock_buffer(bh);
758
759 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
760 err = ext4_handle_dirty_metadata(handle, inode, bh);
761 if (err)
762 goto failed;
763 }
764 *blks = num;
765 return err;
766 failed:
767 /* Allocation failed, free what we already allocated */
768 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
769 for (i = 1; i <= n ; i++) {
770 /*
771 * branch[i].bh is newly allocated, so there is no
772 * need to revoke the block, which is why we don't
773 * need to set EXT4_FREE_BLOCKS_METADATA.
774 */
775 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
776 EXT4_FREE_BLOCKS_FORGET);
777 }
778 for (i = n+1; i < indirect_blks; i++)
779 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
780
781 ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
782
783 return err;
784 }
785
786 /**
787 * ext4_splice_branch - splice the allocated branch onto inode.
788 * @inode: owner
789 * @block: (logical) number of block we are adding
790 * @chain: chain of indirect blocks (with a missing link - see
791 * ext4_alloc_branch)
792 * @where: location of missing link
793 * @num: number of indirect blocks we are adding
794 * @blks: number of direct blocks we are adding
795 *
796 * This function fills the missing link and does all housekeeping needed in
797 * inode (->i_blocks, etc.). In case of success we end up with the full
798 * chain to new block and return 0.
799 */
800 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
801 ext4_lblk_t block, Indirect *where, int num,
802 int blks)
803 {
804 int i;
805 int err = 0;
806 ext4_fsblk_t current_block;
807
808 /*
809 * If we're splicing into a [td]indirect block (as opposed to the
810 * inode) then we need to get write access to the [td]indirect block
811 * before the splice.
812 */
813 if (where->bh) {
814 BUFFER_TRACE(where->bh, "get_write_access");
815 err = ext4_journal_get_write_access(handle, where->bh);
816 if (err)
817 goto err_out;
818 }
819 /* That's it */
820
821 *where->p = where->key;
822
823 /*
824 * Update the host buffer_head or inode to point to more just allocated
825 * direct blocks blocks
826 */
827 if (num == 0 && blks > 1) {
828 current_block = le32_to_cpu(where->key) + 1;
829 for (i = 1; i < blks; i++)
830 *(where->p + i) = cpu_to_le32(current_block++);
831 }
832
833 /* We are done with atomic stuff, now do the rest of housekeeping */
834 /* had we spliced it onto indirect block? */
835 if (where->bh) {
836 /*
837 * If we spliced it onto an indirect block, we haven't
838 * altered the inode. Note however that if it is being spliced
839 * onto an indirect block at the very end of the file (the
840 * file is growing) then we *will* alter the inode to reflect
841 * the new i_size. But that is not done here - it is done in
842 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
843 */
844 jbd_debug(5, "splicing indirect only\n");
845 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
846 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
847 if (err)
848 goto err_out;
849 } else {
850 /*
851 * OK, we spliced it into the inode itself on a direct block.
852 */
853 ext4_mark_inode_dirty(handle, inode);
854 jbd_debug(5, "splicing direct\n");
855 }
856 return err;
857
858 err_out:
859 for (i = 1; i <= num; i++) {
860 /*
861 * branch[i].bh is newly allocated, so there is no
862 * need to revoke the block, which is why we don't
863 * need to set EXT4_FREE_BLOCKS_METADATA.
864 */
865 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
866 EXT4_FREE_BLOCKS_FORGET);
867 }
868 ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
869 blks, 0);
870
871 return err;
872 }
873
874 /*
875 * The ext4_ind_get_blocks() function handles non-extents inodes
876 * (i.e., using the traditional indirect/double-indirect i_blocks
877 * scheme) for ext4_get_blocks().
878 *
879 * Allocation strategy is simple: if we have to allocate something, we will
880 * have to go the whole way to leaf. So let's do it before attaching anything
881 * to tree, set linkage between the newborn blocks, write them if sync is
882 * required, recheck the path, free and repeat if check fails, otherwise
883 * set the last missing link (that will protect us from any truncate-generated
884 * removals - all blocks on the path are immune now) and possibly force the
885 * write on the parent block.
886 * That has a nice additional property: no special recovery from the failed
887 * allocations is needed - we simply release blocks and do not touch anything
888 * reachable from inode.
889 *
890 * `handle' can be NULL if create == 0.
891 *
892 * return > 0, # of blocks mapped or allocated.
893 * return = 0, if plain lookup failed.
894 * return < 0, error case.
895 *
896 * The ext4_ind_get_blocks() function should be called with
897 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
898 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
899 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
900 * blocks.
901 */
902 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
903 ext4_lblk_t iblock, unsigned int maxblocks,
904 struct buffer_head *bh_result,
905 int flags)
906 {
907 int err = -EIO;
908 ext4_lblk_t offsets[4];
909 Indirect chain[4];
910 Indirect *partial;
911 ext4_fsblk_t goal;
912 int indirect_blks;
913 int blocks_to_boundary = 0;
914 int depth;
915 int count = 0;
916 ext4_fsblk_t first_block = 0;
917
918 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
919 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
920 depth = ext4_block_to_path(inode, iblock, offsets,
921 &blocks_to_boundary);
922
923 if (depth == 0)
924 goto out;
925
926 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
927
928 /* Simplest case - block found, no allocation needed */
929 if (!partial) {
930 first_block = le32_to_cpu(chain[depth - 1].key);
931 clear_buffer_new(bh_result);
932 count++;
933 /*map more blocks*/
934 while (count < maxblocks && count <= blocks_to_boundary) {
935 ext4_fsblk_t blk;
936
937 blk = le32_to_cpu(*(chain[depth-1].p + count));
938
939 if (blk == first_block + count)
940 count++;
941 else
942 break;
943 }
944 goto got_it;
945 }
946
947 /* Next simple case - plain lookup or failed read of indirect block */
948 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
949 goto cleanup;
950
951 /*
952 * Okay, we need to do block allocation.
953 */
954 goal = ext4_find_goal(inode, iblock, partial);
955
956 /* the number of blocks need to allocate for [d,t]indirect blocks */
957 indirect_blks = (chain + depth) - partial - 1;
958
959 /*
960 * Next look up the indirect map to count the totoal number of
961 * direct blocks to allocate for this branch.
962 */
963 count = ext4_blks_to_allocate(partial, indirect_blks,
964 maxblocks, blocks_to_boundary);
965 /*
966 * Block out ext4_truncate while we alter the tree
967 */
968 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
969 &count, goal,
970 offsets + (partial - chain), partial);
971
972 /*
973 * The ext4_splice_branch call will free and forget any buffers
974 * on the new chain if there is a failure, but that risks using
975 * up transaction credits, especially for bitmaps where the
976 * credits cannot be returned. Can we handle this somehow? We
977 * may need to return -EAGAIN upwards in the worst case. --sct
978 */
979 if (!err)
980 err = ext4_splice_branch(handle, inode, iblock,
981 partial, indirect_blks, count);
982 if (err)
983 goto cleanup;
984
985 set_buffer_new(bh_result);
986
987 ext4_update_inode_fsync_trans(handle, inode, 1);
988 got_it:
989 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
990 if (count > blocks_to_boundary)
991 set_buffer_boundary(bh_result);
992 err = count;
993 /* Clean up and exit */
994 partial = chain + depth - 1; /* the whole chain */
995 cleanup:
996 while (partial > chain) {
997 BUFFER_TRACE(partial->bh, "call brelse");
998 brelse(partial->bh);
999 partial--;
1000 }
1001 BUFFER_TRACE(bh_result, "returned");
1002 out:
1003 return err;
1004 }
1005
1006 #ifdef CONFIG_QUOTA
1007 qsize_t *ext4_get_reserved_space(struct inode *inode)
1008 {
1009 return &EXT4_I(inode)->i_reserved_quota;
1010 }
1011 #endif
1012 /*
1013 * Calculate the number of metadata blocks need to reserve
1014 * to allocate @blocks for non extent file based file
1015 */
1016 static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
1017 {
1018 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1019 int ind_blks, dind_blks, tind_blks;
1020
1021 /* number of new indirect blocks needed */
1022 ind_blks = (blocks + icap - 1) / icap;
1023
1024 dind_blks = (ind_blks + icap - 1) / icap;
1025
1026 tind_blks = 1;
1027
1028 return ind_blks + dind_blks + tind_blks;
1029 }
1030
1031 /*
1032 * Calculate the number of metadata blocks need to reserve
1033 * to allocate given number of blocks
1034 */
1035 static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1036 {
1037 if (!blocks)
1038 return 0;
1039
1040 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1041 return ext4_ext_calc_metadata_amount(inode, blocks);
1042
1043 return ext4_indirect_calc_metadata_amount(inode, blocks);
1044 }
1045
1046 /*
1047 * Called with i_data_sem down, which is important since we can call
1048 * ext4_discard_preallocations() from here.
1049 */
1050 static void ext4_da_update_reserve_space(struct inode *inode, int used)
1051 {
1052 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1053 struct ext4_inode_info *ei = EXT4_I(inode);
1054 int mdb_free = 0;
1055
1056 spin_lock(&ei->i_block_reservation_lock);
1057 if (unlikely(used > ei->i_reserved_data_blocks)) {
1058 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
1059 "with only %d reserved data blocks\n",
1060 __func__, inode->i_ino, used,
1061 ei->i_reserved_data_blocks);
1062 WARN_ON(1);
1063 used = ei->i_reserved_data_blocks;
1064 }
1065
1066 /* Update per-inode reservations */
1067 ei->i_reserved_data_blocks -= used;
1068 used += ei->i_allocated_meta_blocks;
1069 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1070 ei->i_allocated_meta_blocks = 0;
1071 percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
1072
1073 if (ei->i_reserved_data_blocks == 0) {
1074 /*
1075 * We can release all of the reserved metadata blocks
1076 * only when we have written all of the delayed
1077 * allocation blocks.
1078 */
1079 mdb_free = ei->i_allocated_meta_blocks;
1080 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1081 ei->i_allocated_meta_blocks = 0;
1082 }
1083 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1084
1085 /* Update quota subsystem */
1086 vfs_dq_claim_block(inode, used);
1087 if (mdb_free)
1088 vfs_dq_release_reservation_block(inode, mdb_free);
1089
1090 /*
1091 * If we have done all the pending block allocations and if
1092 * there aren't any writers on the inode, we can discard the
1093 * inode's preallocations.
1094 */
1095 if ((ei->i_reserved_data_blocks == 0) &&
1096 (atomic_read(&inode->i_writecount) == 0))
1097 ext4_discard_preallocations(inode);
1098 }
1099
1100 static int check_block_validity(struct inode *inode, const char *msg,
1101 sector_t logical, sector_t phys, int len)
1102 {
1103 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
1104 ext4_error(inode->i_sb, msg,
1105 "inode #%lu logical block %llu mapped to %llu "
1106 "(size %d)", inode->i_ino,
1107 (unsigned long long) logical,
1108 (unsigned long long) phys, len);
1109 return -EIO;
1110 }
1111 return 0;
1112 }
1113
1114 /*
1115 * Return the number of contiguous dirty pages in a given inode
1116 * starting at page frame idx.
1117 */
1118 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1119 unsigned int max_pages)
1120 {
1121 struct address_space *mapping = inode->i_mapping;
1122 pgoff_t index;
1123 struct pagevec pvec;
1124 pgoff_t num = 0;
1125 int i, nr_pages, done = 0;
1126
1127 if (max_pages == 0)
1128 return 0;
1129 pagevec_init(&pvec, 0);
1130 while (!done) {
1131 index = idx;
1132 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1133 PAGECACHE_TAG_DIRTY,
1134 (pgoff_t)PAGEVEC_SIZE);
1135 if (nr_pages == 0)
1136 break;
1137 for (i = 0; i < nr_pages; i++) {
1138 struct page *page = pvec.pages[i];
1139 struct buffer_head *bh, *head;
1140
1141 lock_page(page);
1142 if (unlikely(page->mapping != mapping) ||
1143 !PageDirty(page) ||
1144 PageWriteback(page) ||
1145 page->index != idx) {
1146 done = 1;
1147 unlock_page(page);
1148 break;
1149 }
1150 if (page_has_buffers(page)) {
1151 bh = head = page_buffers(page);
1152 do {
1153 if (!buffer_delay(bh) &&
1154 !buffer_unwritten(bh))
1155 done = 1;
1156 bh = bh->b_this_page;
1157 } while (!done && (bh != head));
1158 }
1159 unlock_page(page);
1160 if (done)
1161 break;
1162 idx++;
1163 num++;
1164 if (num >= max_pages)
1165 break;
1166 }
1167 pagevec_release(&pvec);
1168 }
1169 return num;
1170 }
1171
1172 /*
1173 * The ext4_get_blocks() function tries to look up the requested blocks,
1174 * and returns if the blocks are already mapped.
1175 *
1176 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1177 * and store the allocated blocks in the result buffer head and mark it
1178 * mapped.
1179 *
1180 * If file type is extents based, it will call ext4_ext_get_blocks(),
1181 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
1182 * based files
1183 *
1184 * On success, it returns the number of blocks being mapped or allocate.
1185 * if create==0 and the blocks are pre-allocated and uninitialized block,
1186 * the result buffer head is unmapped. If the create ==1, it will make sure
1187 * the buffer head is mapped.
1188 *
1189 * It returns 0 if plain look up failed (blocks have not been allocated), in
1190 * that casem, buffer head is unmapped
1191 *
1192 * It returns the error in case of allocation failure.
1193 */
1194 int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1195 unsigned int max_blocks, struct buffer_head *bh,
1196 int flags)
1197 {
1198 int retval;
1199
1200 clear_buffer_mapped(bh);
1201 clear_buffer_unwritten(bh);
1202
1203 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
1204 "logical block %lu\n", inode->i_ino, flags, max_blocks,
1205 (unsigned long)block);
1206 /*
1207 * Try to see if we can get the block without requesting a new
1208 * file system block.
1209 */
1210 down_read((&EXT4_I(inode)->i_data_sem));
1211 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1212 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1213 bh, 0);
1214 } else {
1215 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
1216 bh, 0);
1217 }
1218 up_read((&EXT4_I(inode)->i_data_sem));
1219
1220 if (retval > 0 && buffer_mapped(bh)) {
1221 int ret = check_block_validity(inode, "file system corruption",
1222 block, bh->b_blocknr, retval);
1223 if (ret != 0)
1224 return ret;
1225 }
1226
1227 /* If it is only a block(s) look up */
1228 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1229 return retval;
1230
1231 /*
1232 * Returns if the blocks have already allocated
1233 *
1234 * Note that if blocks have been preallocated
1235 * ext4_ext_get_block() returns th create = 0
1236 * with buffer head unmapped.
1237 */
1238 if (retval > 0 && buffer_mapped(bh))
1239 return retval;
1240
1241 /*
1242 * When we call get_blocks without the create flag, the
1243 * BH_Unwritten flag could have gotten set if the blocks
1244 * requested were part of a uninitialized extent. We need to
1245 * clear this flag now that we are committed to convert all or
1246 * part of the uninitialized extent to be an initialized
1247 * extent. This is because we need to avoid the combination
1248 * of BH_Unwritten and BH_Mapped flags being simultaneously
1249 * set on the buffer_head.
1250 */
1251 clear_buffer_unwritten(bh);
1252
1253 /*
1254 * New blocks allocate and/or writing to uninitialized extent
1255 * will possibly result in updating i_data, so we take
1256 * the write lock of i_data_sem, and call get_blocks()
1257 * with create == 1 flag.
1258 */
1259 down_write((&EXT4_I(inode)->i_data_sem));
1260
1261 /*
1262 * if the caller is from delayed allocation writeout path
1263 * we have already reserved fs blocks for allocation
1264 * let the underlying get_block() function know to
1265 * avoid double accounting
1266 */
1267 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1268 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1269 /*
1270 * We need to check for EXT4 here because migrate
1271 * could have changed the inode type in between
1272 */
1273 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1274 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1275 bh, flags);
1276 } else {
1277 retval = ext4_ind_get_blocks(handle, inode, block,
1278 max_blocks, bh, flags);
1279
1280 if (retval > 0 && buffer_new(bh)) {
1281 /*
1282 * We allocated new blocks which will result in
1283 * i_data's format changing. Force the migrate
1284 * to fail by clearing migrate flags
1285 */
1286 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
1287 }
1288 }
1289
1290 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1291 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1292
1293 /*
1294 * Update reserved blocks/metadata blocks after successful
1295 * block allocation which had been deferred till now.
1296 */
1297 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
1298 ext4_da_update_reserve_space(inode, retval);
1299
1300 up_write((&EXT4_I(inode)->i_data_sem));
1301 if (retval > 0 && buffer_mapped(bh)) {
1302 int ret = check_block_validity(inode, "file system "
1303 "corruption after allocation",
1304 block, bh->b_blocknr, retval);
1305 if (ret != 0)
1306 return ret;
1307 }
1308 return retval;
1309 }
1310
1311 /* Maximum number of blocks we map for direct IO at once. */
1312 #define DIO_MAX_BLOCKS 4096
1313
1314 int ext4_get_block(struct inode *inode, sector_t iblock,
1315 struct buffer_head *bh_result, int create)
1316 {
1317 handle_t *handle = ext4_journal_current_handle();
1318 int ret = 0, started = 0;
1319 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1320 int dio_credits;
1321
1322 if (create && !handle) {
1323 /* Direct IO write... */
1324 if (max_blocks > DIO_MAX_BLOCKS)
1325 max_blocks = DIO_MAX_BLOCKS;
1326 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1327 handle = ext4_journal_start(inode, dio_credits);
1328 if (IS_ERR(handle)) {
1329 ret = PTR_ERR(handle);
1330 goto out;
1331 }
1332 started = 1;
1333 }
1334
1335 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
1336 create ? EXT4_GET_BLOCKS_CREATE : 0);
1337 if (ret > 0) {
1338 bh_result->b_size = (ret << inode->i_blkbits);
1339 ret = 0;
1340 }
1341 if (started)
1342 ext4_journal_stop(handle);
1343 out:
1344 return ret;
1345 }
1346
1347 /*
1348 * `handle' can be NULL if create is zero
1349 */
1350 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1351 ext4_lblk_t block, int create, int *errp)
1352 {
1353 struct buffer_head dummy;
1354 int fatal = 0, err;
1355 int flags = 0;
1356
1357 J_ASSERT(handle != NULL || create == 0);
1358
1359 dummy.b_state = 0;
1360 dummy.b_blocknr = -1000;
1361 buffer_trace_init(&dummy.b_history);
1362 if (create)
1363 flags |= EXT4_GET_BLOCKS_CREATE;
1364 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
1365 /*
1366 * ext4_get_blocks() returns number of blocks mapped. 0 in
1367 * case of a HOLE.
1368 */
1369 if (err > 0) {
1370 if (err > 1)
1371 WARN_ON(1);
1372 err = 0;
1373 }
1374 *errp = err;
1375 if (!err && buffer_mapped(&dummy)) {
1376 struct buffer_head *bh;
1377 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1378 if (!bh) {
1379 *errp = -EIO;
1380 goto err;
1381 }
1382 if (buffer_new(&dummy)) {
1383 J_ASSERT(create != 0);
1384 J_ASSERT(handle != NULL);
1385
1386 /*
1387 * Now that we do not always journal data, we should
1388 * keep in mind whether this should always journal the
1389 * new buffer as metadata. For now, regular file
1390 * writes use ext4_get_block instead, so it's not a
1391 * problem.
1392 */
1393 lock_buffer(bh);
1394 BUFFER_TRACE(bh, "call get_create_access");
1395 fatal = ext4_journal_get_create_access(handle, bh);
1396 if (!fatal && !buffer_uptodate(bh)) {
1397 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1398 set_buffer_uptodate(bh);
1399 }
1400 unlock_buffer(bh);
1401 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1402 err = ext4_handle_dirty_metadata(handle, inode, bh);
1403 if (!fatal)
1404 fatal = err;
1405 } else {
1406 BUFFER_TRACE(bh, "not a new buffer");
1407 }
1408 if (fatal) {
1409 *errp = fatal;
1410 brelse(bh);
1411 bh = NULL;
1412 }
1413 return bh;
1414 }
1415 err:
1416 return NULL;
1417 }
1418
1419 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1420 ext4_lblk_t block, int create, int *err)
1421 {
1422 struct buffer_head *bh;
1423
1424 bh = ext4_getblk(handle, inode, block, create, err);
1425 if (!bh)
1426 return bh;
1427 if (buffer_uptodate(bh))
1428 return bh;
1429 ll_rw_block(READ_META, 1, &bh);
1430 wait_on_buffer(bh);
1431 if (buffer_uptodate(bh))
1432 return bh;
1433 put_bh(bh);
1434 *err = -EIO;
1435 return NULL;
1436 }
1437
1438 static int walk_page_buffers(handle_t *handle,
1439 struct buffer_head *head,
1440 unsigned from,
1441 unsigned to,
1442 int *partial,
1443 int (*fn)(handle_t *handle,
1444 struct buffer_head *bh))
1445 {
1446 struct buffer_head *bh;
1447 unsigned block_start, block_end;
1448 unsigned blocksize = head->b_size;
1449 int err, ret = 0;
1450 struct buffer_head *next;
1451
1452 for (bh = head, block_start = 0;
1453 ret == 0 && (bh != head || !block_start);
1454 block_start = block_end, bh = next) {
1455 next = bh->b_this_page;
1456 block_end = block_start + blocksize;
1457 if (block_end <= from || block_start >= to) {
1458 if (partial && !buffer_uptodate(bh))
1459 *partial = 1;
1460 continue;
1461 }
1462 err = (*fn)(handle, bh);
1463 if (!ret)
1464 ret = err;
1465 }
1466 return ret;
1467 }
1468
1469 /*
1470 * To preserve ordering, it is essential that the hole instantiation and
1471 * the data write be encapsulated in a single transaction. We cannot
1472 * close off a transaction and start a new one between the ext4_get_block()
1473 * and the commit_write(). So doing the jbd2_journal_start at the start of
1474 * prepare_write() is the right place.
1475 *
1476 * Also, this function can nest inside ext4_writepage() ->
1477 * block_write_full_page(). In that case, we *know* that ext4_writepage()
1478 * has generated enough buffer credits to do the whole page. So we won't
1479 * block on the journal in that case, which is good, because the caller may
1480 * be PF_MEMALLOC.
1481 *
1482 * By accident, ext4 can be reentered when a transaction is open via
1483 * quota file writes. If we were to commit the transaction while thus
1484 * reentered, there can be a deadlock - we would be holding a quota
1485 * lock, and the commit would never complete if another thread had a
1486 * transaction open and was blocking on the quota lock - a ranking
1487 * violation.
1488 *
1489 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1490 * will _not_ run commit under these circumstances because handle->h_ref
1491 * is elevated. We'll still have enough credits for the tiny quotafile
1492 * write.
1493 */
1494 static int do_journal_get_write_access(handle_t *handle,
1495 struct buffer_head *bh)
1496 {
1497 if (!buffer_mapped(bh) || buffer_freed(bh))
1498 return 0;
1499 return ext4_journal_get_write_access(handle, bh);
1500 }
1501
1502 /*
1503 * Truncate blocks that were not used by write. We have to truncate the
1504 * pagecache as well so that corresponding buffers get properly unmapped.
1505 */
1506 static void ext4_truncate_failed_write(struct inode *inode)
1507 {
1508 truncate_inode_pages(inode->i_mapping, inode->i_size);
1509 ext4_truncate(inode);
1510 }
1511
1512 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1513 loff_t pos, unsigned len, unsigned flags,
1514 struct page **pagep, void **fsdata)
1515 {
1516 struct inode *inode = mapping->host;
1517 int ret, needed_blocks;
1518 handle_t *handle;
1519 int retries = 0;
1520 struct page *page;
1521 pgoff_t index;
1522 unsigned from, to;
1523
1524 trace_ext4_write_begin(inode, pos, len, flags);
1525 /*
1526 * Reserve one block more for addition to orphan list in case
1527 * we allocate blocks but write fails for some reason
1528 */
1529 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1530 index = pos >> PAGE_CACHE_SHIFT;
1531 from = pos & (PAGE_CACHE_SIZE - 1);
1532 to = from + len;
1533
1534 retry:
1535 handle = ext4_journal_start(inode, needed_blocks);
1536 if (IS_ERR(handle)) {
1537 ret = PTR_ERR(handle);
1538 goto out;
1539 }
1540
1541 /* We cannot recurse into the filesystem as the transaction is already
1542 * started */
1543 flags |= AOP_FLAG_NOFS;
1544
1545 page = grab_cache_page_write_begin(mapping, index, flags);
1546 if (!page) {
1547 ext4_journal_stop(handle);
1548 ret = -ENOMEM;
1549 goto out;
1550 }
1551 *pagep = page;
1552
1553 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1554 ext4_get_block);
1555
1556 if (!ret && ext4_should_journal_data(inode)) {
1557 ret = walk_page_buffers(handle, page_buffers(page),
1558 from, to, NULL, do_journal_get_write_access);
1559 }
1560
1561 if (ret) {
1562 unlock_page(page);
1563 page_cache_release(page);
1564 /*
1565 * block_write_begin may have instantiated a few blocks
1566 * outside i_size. Trim these off again. Don't need
1567 * i_size_read because we hold i_mutex.
1568 *
1569 * Add inode to orphan list in case we crash before
1570 * truncate finishes
1571 */
1572 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1573 ext4_orphan_add(handle, inode);
1574
1575 ext4_journal_stop(handle);
1576 if (pos + len > inode->i_size) {
1577 ext4_truncate_failed_write(inode);
1578 /*
1579 * If truncate failed early the inode might
1580 * still be on the orphan list; we need to
1581 * make sure the inode is removed from the
1582 * orphan list in that case.
1583 */
1584 if (inode->i_nlink)
1585 ext4_orphan_del(NULL, inode);
1586 }
1587 }
1588
1589 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1590 goto retry;
1591 out:
1592 return ret;
1593 }
1594
1595 /* For write_end() in data=journal mode */
1596 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1597 {
1598 if (!buffer_mapped(bh) || buffer_freed(bh))
1599 return 0;
1600 set_buffer_uptodate(bh);
1601 return ext4_handle_dirty_metadata(handle, NULL, bh);
1602 }
1603
1604 static int ext4_generic_write_end(struct file *file,
1605 struct address_space *mapping,
1606 loff_t pos, unsigned len, unsigned copied,
1607 struct page *page, void *fsdata)
1608 {
1609 int i_size_changed = 0;
1610 struct inode *inode = mapping->host;
1611 handle_t *handle = ext4_journal_current_handle();
1612
1613 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1614
1615 /*
1616 * No need to use i_size_read() here, the i_size
1617 * cannot change under us because we hold i_mutex.
1618 *
1619 * But it's important to update i_size while still holding page lock:
1620 * page writeout could otherwise come in and zero beyond i_size.
1621 */
1622 if (pos + copied > inode->i_size) {
1623 i_size_write(inode, pos + copied);
1624 i_size_changed = 1;
1625 }
1626
1627 if (pos + copied > EXT4_I(inode)->i_disksize) {
1628 /* We need to mark inode dirty even if
1629 * new_i_size is less that inode->i_size
1630 * bu greater than i_disksize.(hint delalloc)
1631 */
1632 ext4_update_i_disksize(inode, (pos + copied));
1633 i_size_changed = 1;
1634 }
1635 unlock_page(page);
1636 page_cache_release(page);
1637
1638 /*
1639 * Don't mark the inode dirty under page lock. First, it unnecessarily
1640 * makes the holding time of page lock longer. Second, it forces lock
1641 * ordering of page lock and transaction start for journaling
1642 * filesystems.
1643 */
1644 if (i_size_changed)
1645 ext4_mark_inode_dirty(handle, inode);
1646
1647 return copied;
1648 }
1649
1650 /*
1651 * We need to pick up the new inode size which generic_commit_write gave us
1652 * `file' can be NULL - eg, when called from page_symlink().
1653 *
1654 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1655 * buffers are managed internally.
1656 */
1657 static int ext4_ordered_write_end(struct file *file,
1658 struct address_space *mapping,
1659 loff_t pos, unsigned len, unsigned copied,
1660 struct page *page, void *fsdata)
1661 {
1662 handle_t *handle = ext4_journal_current_handle();
1663 struct inode *inode = mapping->host;
1664 int ret = 0, ret2;
1665
1666 trace_ext4_ordered_write_end(inode, pos, len, copied);
1667 ret = ext4_jbd2_file_inode(handle, inode);
1668
1669 if (ret == 0) {
1670 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1671 page, fsdata);
1672 copied = ret2;
1673 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1674 /* if we have allocated more blocks and copied
1675 * less. We will have blocks allocated outside
1676 * inode->i_size. So truncate them
1677 */
1678 ext4_orphan_add(handle, inode);
1679 if (ret2 < 0)
1680 ret = ret2;
1681 }
1682 ret2 = ext4_journal_stop(handle);
1683 if (!ret)
1684 ret = ret2;
1685
1686 if (pos + len > inode->i_size) {
1687 ext4_truncate_failed_write(inode);
1688 /*
1689 * If truncate failed early the inode might still be
1690 * on the orphan list; we need to make sure the inode
1691 * is removed from the orphan list in that case.
1692 */
1693 if (inode->i_nlink)
1694 ext4_orphan_del(NULL, inode);
1695 }
1696
1697
1698 return ret ? ret : copied;
1699 }
1700
1701 static int ext4_writeback_write_end(struct file *file,
1702 struct address_space *mapping,
1703 loff_t pos, unsigned len, unsigned copied,
1704 struct page *page, void *fsdata)
1705 {
1706 handle_t *handle = ext4_journal_current_handle();
1707 struct inode *inode = mapping->host;
1708 int ret = 0, ret2;
1709
1710 trace_ext4_writeback_write_end(inode, pos, len, copied);
1711 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1712 page, fsdata);
1713 copied = ret2;
1714 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1715 /* if we have allocated more blocks and copied
1716 * less. We will have blocks allocated outside
1717 * inode->i_size. So truncate them
1718 */
1719 ext4_orphan_add(handle, inode);
1720
1721 if (ret2 < 0)
1722 ret = ret2;
1723
1724 ret2 = ext4_journal_stop(handle);
1725 if (!ret)
1726 ret = ret2;
1727
1728 if (pos + len > inode->i_size) {
1729 ext4_truncate_failed_write(inode);
1730 /*
1731 * If truncate failed early the inode might still be
1732 * on the orphan list; we need to make sure the inode
1733 * is removed from the orphan list in that case.
1734 */
1735 if (inode->i_nlink)
1736 ext4_orphan_del(NULL, inode);
1737 }
1738
1739 return ret ? ret : copied;
1740 }
1741
1742 static int ext4_journalled_write_end(struct file *file,
1743 struct address_space *mapping,
1744 loff_t pos, unsigned len, unsigned copied,
1745 struct page *page, void *fsdata)
1746 {
1747 handle_t *handle = ext4_journal_current_handle();
1748 struct inode *inode = mapping->host;
1749 int ret = 0, ret2;
1750 int partial = 0;
1751 unsigned from, to;
1752 loff_t new_i_size;
1753
1754 trace_ext4_journalled_write_end(inode, pos, len, copied);
1755 from = pos & (PAGE_CACHE_SIZE - 1);
1756 to = from + len;
1757
1758 if (copied < len) {
1759 if (!PageUptodate(page))
1760 copied = 0;
1761 page_zero_new_buffers(page, from+copied, to);
1762 }
1763
1764 ret = walk_page_buffers(handle, page_buffers(page), from,
1765 to, &partial, write_end_fn);
1766 if (!partial)
1767 SetPageUptodate(page);
1768 new_i_size = pos + copied;
1769 if (new_i_size > inode->i_size)
1770 i_size_write(inode, pos+copied);
1771 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1772 if (new_i_size > EXT4_I(inode)->i_disksize) {
1773 ext4_update_i_disksize(inode, new_i_size);
1774 ret2 = ext4_mark_inode_dirty(handle, inode);
1775 if (!ret)
1776 ret = ret2;
1777 }
1778
1779 unlock_page(page);
1780 page_cache_release(page);
1781 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1782 /* if we have allocated more blocks and copied
1783 * less. We will have blocks allocated outside
1784 * inode->i_size. So truncate them
1785 */
1786 ext4_orphan_add(handle, inode);
1787
1788 ret2 = ext4_journal_stop(handle);
1789 if (!ret)
1790 ret = ret2;
1791 if (pos + len > inode->i_size) {
1792 ext4_truncate_failed_write(inode);
1793 /*
1794 * If truncate failed early the inode might still be
1795 * on the orphan list; we need to make sure the inode
1796 * is removed from the orphan list in that case.
1797 */
1798 if (inode->i_nlink)
1799 ext4_orphan_del(NULL, inode);
1800 }
1801
1802 return ret ? ret : copied;
1803 }
1804
1805 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1806 {
1807 int retries = 0;
1808 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1809 struct ext4_inode_info *ei = EXT4_I(inode);
1810 unsigned long md_needed, md_reserved, total = 0;
1811
1812 /*
1813 * recalculate the amount of metadata blocks to reserve
1814 * in order to allocate nrblocks
1815 * worse case is one extent per block
1816 */
1817 repeat:
1818 spin_lock(&ei->i_block_reservation_lock);
1819 md_reserved = ei->i_reserved_meta_blocks;
1820 md_needed = ext4_calc_metadata_amount(inode, nrblocks);
1821 total = md_needed + nrblocks;
1822 spin_unlock(&ei->i_block_reservation_lock);
1823
1824 /*
1825 * Make quota reservation here to prevent quota overflow
1826 * later. Real quota accounting is done at pages writeout
1827 * time.
1828 */
1829 if (vfs_dq_reserve_block(inode, total)) {
1830 /*
1831 * We tend to badly over-estimate the amount of
1832 * metadata blocks which are needed, so if we have
1833 * reserved any metadata blocks, try to force out the
1834 * inode and see if we have any better luck.
1835 */
1836 if (md_reserved && retries++ <= 3)
1837 goto retry;
1838 return -EDQUOT;
1839 }
1840
1841 if (ext4_claim_free_blocks(sbi, total)) {
1842 vfs_dq_release_reservation_block(inode, total);
1843 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1844 retry:
1845 if (md_reserved)
1846 write_inode_now(inode, (retries == 3));
1847 yield();
1848 goto repeat;
1849 }
1850 return -ENOSPC;
1851 }
1852 spin_lock(&ei->i_block_reservation_lock);
1853 ei->i_reserved_data_blocks += nrblocks;
1854 ei->i_reserved_meta_blocks += md_needed;
1855 spin_unlock(&ei->i_block_reservation_lock);
1856
1857 return 0; /* success */
1858 }
1859
1860 static void ext4_da_release_space(struct inode *inode, int to_free)
1861 {
1862 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1863 struct ext4_inode_info *ei = EXT4_I(inode);
1864
1865 if (!to_free)
1866 return; /* Nothing to release, exit */
1867
1868 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1869
1870 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1871 /*
1872 * if there aren't enough reserved blocks, then the
1873 * counter is messed up somewhere. Since this
1874 * function is called from invalidate page, it's
1875 * harmless to return without any action.
1876 */
1877 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1878 "ino %lu, to_free %d with only %d reserved "
1879 "data blocks\n", inode->i_ino, to_free,
1880 ei->i_reserved_data_blocks);
1881 WARN_ON(1);
1882 to_free = ei->i_reserved_data_blocks;
1883 }
1884 ei->i_reserved_data_blocks -= to_free;
1885
1886 if (ei->i_reserved_data_blocks == 0) {
1887 /*
1888 * We can release all of the reserved metadata blocks
1889 * only when we have written all of the delayed
1890 * allocation blocks.
1891 */
1892 to_free += ei->i_allocated_meta_blocks;
1893 ei->i_allocated_meta_blocks = 0;
1894 }
1895
1896 /* update fs dirty blocks counter */
1897 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1898
1899 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1900
1901 vfs_dq_release_reservation_block(inode, to_free);
1902 }
1903
1904 static void ext4_da_page_release_reservation(struct page *page,
1905 unsigned long offset)
1906 {
1907 int to_release = 0;
1908 struct buffer_head *head, *bh;
1909 unsigned int curr_off = 0;
1910
1911 head = page_buffers(page);
1912 bh = head;
1913 do {
1914 unsigned int next_off = curr_off + bh->b_size;
1915
1916 if ((offset <= curr_off) && (buffer_delay(bh))) {
1917 to_release++;
1918 clear_buffer_delay(bh);
1919 }
1920 curr_off = next_off;
1921 } while ((bh = bh->b_this_page) != head);
1922 ext4_da_release_space(page->mapping->host, to_release);
1923 }
1924
1925 /*
1926 * Delayed allocation stuff
1927 */
1928
1929 /*
1930 * mpage_da_submit_io - walks through extent of pages and try to write
1931 * them with writepage() call back
1932 *
1933 * @mpd->inode: inode
1934 * @mpd->first_page: first page of the extent
1935 * @mpd->next_page: page after the last page of the extent
1936 *
1937 * By the time mpage_da_submit_io() is called we expect all blocks
1938 * to be allocated. this may be wrong if allocation failed.
1939 *
1940 * As pages are already locked by write_cache_pages(), we can't use it
1941 */
1942 static int mpage_da_submit_io(struct mpage_da_data *mpd)
1943 {
1944 long pages_skipped;
1945 struct pagevec pvec;
1946 unsigned long index, end;
1947 int ret = 0, err, nr_pages, i;
1948 struct inode *inode = mpd->inode;
1949 struct address_space *mapping = inode->i_mapping;
1950
1951 BUG_ON(mpd->next_page <= mpd->first_page);
1952 /*
1953 * We need to start from the first_page to the next_page - 1
1954 * to make sure we also write the mapped dirty buffer_heads.
1955 * If we look at mpd->b_blocknr we would only be looking
1956 * at the currently mapped buffer_heads.
1957 */
1958 index = mpd->first_page;
1959 end = mpd->next_page - 1;
1960
1961 pagevec_init(&pvec, 0);
1962 while (index <= end) {
1963 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1964 if (nr_pages == 0)
1965 break;
1966 for (i = 0; i < nr_pages; i++) {
1967 struct page *page = pvec.pages[i];
1968
1969 index = page->index;
1970 if (index > end)
1971 break;
1972 index++;
1973
1974 BUG_ON(!PageLocked(page));
1975 BUG_ON(PageWriteback(page));
1976
1977 pages_skipped = mpd->wbc->pages_skipped;
1978 err = mapping->a_ops->writepage(page, mpd->wbc);
1979 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
1980 /*
1981 * have successfully written the page
1982 * without skipping the same
1983 */
1984 mpd->pages_written++;
1985 /*
1986 * In error case, we have to continue because
1987 * remaining pages are still locked
1988 * XXX: unlock and re-dirty them?
1989 */
1990 if (ret == 0)
1991 ret = err;
1992 }
1993 pagevec_release(&pvec);
1994 }
1995 return ret;
1996 }
1997
1998 /*
1999 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2000 *
2001 * @mpd->inode - inode to walk through
2002 * @exbh->b_blocknr - first block on a disk
2003 * @exbh->b_size - amount of space in bytes
2004 * @logical - first logical block to start assignment with
2005 *
2006 * the function goes through all passed space and put actual disk
2007 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2008 */
2009 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2010 struct buffer_head *exbh)
2011 {
2012 struct inode *inode = mpd->inode;
2013 struct address_space *mapping = inode->i_mapping;
2014 int blocks = exbh->b_size >> inode->i_blkbits;
2015 sector_t pblock = exbh->b_blocknr, cur_logical;
2016 struct buffer_head *head, *bh;
2017 pgoff_t index, end;
2018 struct pagevec pvec;
2019 int nr_pages, i;
2020
2021 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2022 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2023 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2024
2025 pagevec_init(&pvec, 0);
2026
2027 while (index <= end) {
2028 /* XXX: optimize tail */
2029 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2030 if (nr_pages == 0)
2031 break;
2032 for (i = 0; i < nr_pages; i++) {
2033 struct page *page = pvec.pages[i];
2034
2035 index = page->index;
2036 if (index > end)
2037 break;
2038 index++;
2039
2040 BUG_ON(!PageLocked(page));
2041 BUG_ON(PageWriteback(page));
2042 BUG_ON(!page_has_buffers(page));
2043
2044 bh = page_buffers(page);
2045 head = bh;
2046
2047 /* skip blocks out of the range */
2048 do {
2049 if (cur_logical >= logical)
2050 break;
2051 cur_logical++;
2052 } while ((bh = bh->b_this_page) != head);
2053
2054 do {
2055 if (cur_logical >= logical + blocks)
2056 break;
2057
2058 if (buffer_delay(bh) ||
2059 buffer_unwritten(bh)) {
2060
2061 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2062
2063 if (buffer_delay(bh)) {
2064 clear_buffer_delay(bh);
2065 bh->b_blocknr = pblock;
2066 } else {
2067 /*
2068 * unwritten already should have
2069 * blocknr assigned. Verify that
2070 */
2071 clear_buffer_unwritten(bh);
2072 BUG_ON(bh->b_blocknr != pblock);
2073 }
2074
2075 } else if (buffer_mapped(bh))
2076 BUG_ON(bh->b_blocknr != pblock);
2077
2078 cur_logical++;
2079 pblock++;
2080 } while ((bh = bh->b_this_page) != head);
2081 }
2082 pagevec_release(&pvec);
2083 }
2084 }
2085
2086
2087 /*
2088 * __unmap_underlying_blocks - just a helper function to unmap
2089 * set of blocks described by @bh
2090 */
2091 static inline void __unmap_underlying_blocks(struct inode *inode,
2092 struct buffer_head *bh)
2093 {
2094 struct block_device *bdev = inode->i_sb->s_bdev;
2095 int blocks, i;
2096
2097 blocks = bh->b_size >> inode->i_blkbits;
2098 for (i = 0; i < blocks; i++)
2099 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
2100 }
2101
2102 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2103 sector_t logical, long blk_cnt)
2104 {
2105 int nr_pages, i;
2106 pgoff_t index, end;
2107 struct pagevec pvec;
2108 struct inode *inode = mpd->inode;
2109 struct address_space *mapping = inode->i_mapping;
2110
2111 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2112 end = (logical + blk_cnt - 1) >>
2113 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2114 while (index <= end) {
2115 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2116 if (nr_pages == 0)
2117 break;
2118 for (i = 0; i < nr_pages; i++) {
2119 struct page *page = pvec.pages[i];
2120 index = page->index;
2121 if (index > end)
2122 break;
2123 index++;
2124
2125 BUG_ON(!PageLocked(page));
2126 BUG_ON(PageWriteback(page));
2127 block_invalidatepage(page, 0);
2128 ClearPageUptodate(page);
2129 unlock_page(page);
2130 }
2131 }
2132 return;
2133 }
2134
2135 static void ext4_print_free_blocks(struct inode *inode)
2136 {
2137 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2138 printk(KERN_CRIT "Total free blocks count %lld\n",
2139 ext4_count_free_blocks(inode->i_sb));
2140 printk(KERN_CRIT "Free/Dirty block details\n");
2141 printk(KERN_CRIT "free_blocks=%lld\n",
2142 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
2143 printk(KERN_CRIT "dirty_blocks=%lld\n",
2144 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2145 printk(KERN_CRIT "Block reservation details\n");
2146 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
2147 EXT4_I(inode)->i_reserved_data_blocks);
2148 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
2149 EXT4_I(inode)->i_reserved_meta_blocks);
2150 return;
2151 }
2152
2153 /*
2154 * mpage_da_map_blocks - go through given space
2155 *
2156 * @mpd - bh describing space
2157 *
2158 * The function skips space we know is already mapped to disk blocks.
2159 *
2160 */
2161 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2162 {
2163 int err, blks, get_blocks_flags;
2164 struct buffer_head new;
2165 sector_t next = mpd->b_blocknr;
2166 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2167 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
2168 handle_t *handle = NULL;
2169
2170 /*
2171 * We consider only non-mapped and non-allocated blocks
2172 */
2173 if ((mpd->b_state & (1 << BH_Mapped)) &&
2174 !(mpd->b_state & (1 << BH_Delay)) &&
2175 !(mpd->b_state & (1 << BH_Unwritten)))
2176 return 0;
2177
2178 /*
2179 * If we didn't accumulate anything to write simply return
2180 */
2181 if (!mpd->b_size)
2182 return 0;
2183
2184 handle = ext4_journal_current_handle();
2185 BUG_ON(!handle);
2186
2187 /*
2188 * Call ext4_get_blocks() to allocate any delayed allocation
2189 * blocks, or to convert an uninitialized extent to be
2190 * initialized (in the case where we have written into
2191 * one or more preallocated blocks).
2192 *
2193 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
2194 * indicate that we are on the delayed allocation path. This
2195 * affects functions in many different parts of the allocation
2196 * call path. This flag exists primarily because we don't
2197 * want to change *many* call functions, so ext4_get_blocks()
2198 * will set the magic i_delalloc_reserved_flag once the
2199 * inode's allocation semaphore is taken.
2200 *
2201 * If the blocks in questions were delalloc blocks, set
2202 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2203 * variables are updated after the blocks have been allocated.
2204 */
2205 new.b_state = 0;
2206 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
2207 EXT4_GET_BLOCKS_DELALLOC_RESERVE);
2208 if (mpd->b_state & (1 << BH_Delay))
2209 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
2210 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
2211 &new, get_blocks_flags);
2212 if (blks < 0) {
2213 err = blks;
2214 /*
2215 * If get block returns with error we simply
2216 * return. Later writepage will redirty the page and
2217 * writepages will find the dirty page again
2218 */
2219 if (err == -EAGAIN)
2220 return 0;
2221
2222 if (err == -ENOSPC &&
2223 ext4_count_free_blocks(mpd->inode->i_sb)) {
2224 mpd->retval = err;
2225 return 0;
2226 }
2227
2228 /*
2229 * get block failure will cause us to loop in
2230 * writepages, because a_ops->writepage won't be able
2231 * to make progress. The page will be redirtied by
2232 * writepage and writepages will again try to write
2233 * the same.
2234 */
2235 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2236 "delayed block allocation failed for inode %lu at "
2237 "logical offset %llu with max blocks %zd with "
2238 "error %d\n", mpd->inode->i_ino,
2239 (unsigned long long) next,
2240 mpd->b_size >> mpd->inode->i_blkbits, err);
2241 printk(KERN_CRIT "This should not happen!! "
2242 "Data will be lost\n");
2243 if (err == -ENOSPC) {
2244 ext4_print_free_blocks(mpd->inode);
2245 }
2246 /* invalidate all the pages */
2247 ext4_da_block_invalidatepages(mpd, next,
2248 mpd->b_size >> mpd->inode->i_blkbits);
2249 return err;
2250 }
2251 BUG_ON(blks == 0);
2252
2253 new.b_size = (blks << mpd->inode->i_blkbits);
2254
2255 if (buffer_new(&new))
2256 __unmap_underlying_blocks(mpd->inode, &new);
2257
2258 /*
2259 * If blocks are delayed marked, we need to
2260 * put actual blocknr and drop delayed bit
2261 */
2262 if ((mpd->b_state & (1 << BH_Delay)) ||
2263 (mpd->b_state & (1 << BH_Unwritten)))
2264 mpage_put_bnr_to_bhs(mpd, next, &new);
2265
2266 if (ext4_should_order_data(mpd->inode)) {
2267 err = ext4_jbd2_file_inode(handle, mpd->inode);
2268 if (err)
2269 return err;
2270 }
2271
2272 /*
2273 * Update on-disk size along with block allocation.
2274 */
2275 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
2276 if (disksize > i_size_read(mpd->inode))
2277 disksize = i_size_read(mpd->inode);
2278 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
2279 ext4_update_i_disksize(mpd->inode, disksize);
2280 return ext4_mark_inode_dirty(handle, mpd->inode);
2281 }
2282
2283 return 0;
2284 }
2285
2286 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
2287 (1 << BH_Delay) | (1 << BH_Unwritten))
2288
2289 /*
2290 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
2291 *
2292 * @mpd->lbh - extent of blocks
2293 * @logical - logical number of the block in the file
2294 * @bh - bh of the block (used to access block's state)
2295 *
2296 * the function is used to collect contig. blocks in same state
2297 */
2298 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2299 sector_t logical, size_t b_size,
2300 unsigned long b_state)
2301 {
2302 sector_t next;
2303 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2304
2305 /* check if thereserved journal credits might overflow */
2306 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
2307 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2308 /*
2309 * With non-extent format we are limited by the journal
2310 * credit available. Total credit needed to insert
2311 * nrblocks contiguous blocks is dependent on the
2312 * nrblocks. So limit nrblocks.
2313 */
2314 goto flush_it;
2315 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2316 EXT4_MAX_TRANS_DATA) {
2317 /*
2318 * Adding the new buffer_head would make it cross the
2319 * allowed limit for which we have journal credit
2320 * reserved. So limit the new bh->b_size
2321 */
2322 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2323 mpd->inode->i_blkbits;
2324 /* we will do mpage_da_submit_io in the next loop */
2325 }
2326 }
2327 /*
2328 * First block in the extent
2329 */
2330 if (mpd->b_size == 0) {
2331 mpd->b_blocknr = logical;
2332 mpd->b_size = b_size;
2333 mpd->b_state = b_state & BH_FLAGS;
2334 return;
2335 }
2336
2337 next = mpd->b_blocknr + nrblocks;
2338 /*
2339 * Can we merge the block to our big extent?
2340 */
2341 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
2342 mpd->b_size += b_size;
2343 return;
2344 }
2345
2346 flush_it:
2347 /*
2348 * We couldn't merge the block to our extent, so we
2349 * need to flush current extent and start new one
2350 */
2351 if (mpage_da_map_blocks(mpd) == 0)
2352 mpage_da_submit_io(mpd);
2353 mpd->io_done = 1;
2354 return;
2355 }
2356
2357 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2358 {
2359 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
2360 }
2361
2362 /*
2363 * __mpage_da_writepage - finds extent of pages and blocks
2364 *
2365 * @page: page to consider
2366 * @wbc: not used, we just follow rules
2367 * @data: context
2368 *
2369 * The function finds extents of pages and scan them for all blocks.
2370 */
2371 static int __mpage_da_writepage(struct page *page,
2372 struct writeback_control *wbc, void *data)
2373 {
2374 struct mpage_da_data *mpd = data;
2375 struct inode *inode = mpd->inode;
2376 struct buffer_head *bh, *head;
2377 sector_t logical;
2378
2379 if (mpd->io_done) {
2380 /*
2381 * Rest of the page in the page_vec
2382 * redirty then and skip then. We will
2383 * try to write them again after
2384 * starting a new transaction
2385 */
2386 redirty_page_for_writepage(wbc, page);
2387 unlock_page(page);
2388 return MPAGE_DA_EXTENT_TAIL;
2389 }
2390 /*
2391 * Can we merge this page to current extent?
2392 */
2393 if (mpd->next_page != page->index) {
2394 /*
2395 * Nope, we can't. So, we map non-allocated blocks
2396 * and start IO on them using writepage()
2397 */
2398 if (mpd->next_page != mpd->first_page) {
2399 if (mpage_da_map_blocks(mpd) == 0)
2400 mpage_da_submit_io(mpd);
2401 /*
2402 * skip rest of the page in the page_vec
2403 */
2404 mpd->io_done = 1;
2405 redirty_page_for_writepage(wbc, page);
2406 unlock_page(page);
2407 return MPAGE_DA_EXTENT_TAIL;
2408 }
2409
2410 /*
2411 * Start next extent of pages ...
2412 */
2413 mpd->first_page = page->index;
2414
2415 /*
2416 * ... and blocks
2417 */
2418 mpd->b_size = 0;
2419 mpd->b_state = 0;
2420 mpd->b_blocknr = 0;
2421 }
2422
2423 mpd->next_page = page->index + 1;
2424 logical = (sector_t) page->index <<
2425 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2426
2427 if (!page_has_buffers(page)) {
2428 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2429 (1 << BH_Dirty) | (1 << BH_Uptodate));
2430 if (mpd->io_done)
2431 return MPAGE_DA_EXTENT_TAIL;
2432 } else {
2433 /*
2434 * Page with regular buffer heads, just add all dirty ones
2435 */
2436 head = page_buffers(page);
2437 bh = head;
2438 do {
2439 BUG_ON(buffer_locked(bh));
2440 /*
2441 * We need to try to allocate
2442 * unmapped blocks in the same page.
2443 * Otherwise we won't make progress
2444 * with the page in ext4_writepage
2445 */
2446 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2447 mpage_add_bh_to_extent(mpd, logical,
2448 bh->b_size,
2449 bh->b_state);
2450 if (mpd->io_done)
2451 return MPAGE_DA_EXTENT_TAIL;
2452 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2453 /*
2454 * mapped dirty buffer. We need to update
2455 * the b_state because we look at
2456 * b_state in mpage_da_map_blocks. We don't
2457 * update b_size because if we find an
2458 * unmapped buffer_head later we need to
2459 * use the b_state flag of that buffer_head.
2460 */
2461 if (mpd->b_size == 0)
2462 mpd->b_state = bh->b_state & BH_FLAGS;
2463 }
2464 logical++;
2465 } while ((bh = bh->b_this_page) != head);
2466 }
2467
2468 return 0;
2469 }
2470
2471 /*
2472 * This is a special get_blocks_t callback which is used by
2473 * ext4_da_write_begin(). It will either return mapped block or
2474 * reserve space for a single block.
2475 *
2476 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2477 * We also have b_blocknr = -1 and b_bdev initialized properly
2478 *
2479 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2480 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2481 * initialized properly.
2482 */
2483 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2484 struct buffer_head *bh_result, int create)
2485 {
2486 int ret = 0;
2487 sector_t invalid_block = ~((sector_t) 0xffff);
2488
2489 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2490 invalid_block = ~0;
2491
2492 BUG_ON(create == 0);
2493 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2494
2495 /*
2496 * first, we need to know whether the block is allocated already
2497 * preallocated blocks are unmapped but should treated
2498 * the same as allocated blocks.
2499 */
2500 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
2501 if ((ret == 0) && !buffer_delay(bh_result)) {
2502 /* the block isn't (pre)allocated yet, let's reserve space */
2503 /*
2504 * XXX: __block_prepare_write() unmaps passed block,
2505 * is it OK?
2506 */
2507 ret = ext4_da_reserve_space(inode, 1);
2508 if (ret)
2509 /* not enough space to reserve */
2510 return ret;
2511
2512 map_bh(bh_result, inode->i_sb, invalid_block);
2513 set_buffer_new(bh_result);
2514 set_buffer_delay(bh_result);
2515 } else if (ret > 0) {
2516 bh_result->b_size = (ret << inode->i_blkbits);
2517 if (buffer_unwritten(bh_result)) {
2518 /* A delayed write to unwritten bh should
2519 * be marked new and mapped. Mapped ensures
2520 * that we don't do get_block multiple times
2521 * when we write to the same offset and new
2522 * ensures that we do proper zero out for
2523 * partial write.
2524 */
2525 set_buffer_new(bh_result);
2526 set_buffer_mapped(bh_result);
2527 }
2528 ret = 0;
2529 }
2530
2531 return ret;
2532 }
2533
2534 /*
2535 * This function is used as a standard get_block_t calback function
2536 * when there is no desire to allocate any blocks. It is used as a
2537 * callback function for block_prepare_write(), nobh_writepage(), and
2538 * block_write_full_page(). These functions should only try to map a
2539 * single block at a time.
2540 *
2541 * Since this function doesn't do block allocations even if the caller
2542 * requests it by passing in create=1, it is critically important that
2543 * any caller checks to make sure that any buffer heads are returned
2544 * by this function are either all already mapped or marked for
2545 * delayed allocation before calling nobh_writepage() or
2546 * block_write_full_page(). Otherwise, b_blocknr could be left
2547 * unitialized, and the page write functions will be taken by
2548 * surprise.
2549 */
2550 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2551 struct buffer_head *bh_result, int create)
2552 {
2553 int ret = 0;
2554 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2555
2556 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2557
2558 /*
2559 * we don't want to do block allocation in writepage
2560 * so call get_block_wrap with create = 0
2561 */
2562 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2563 if (ret > 0) {
2564 bh_result->b_size = (ret << inode->i_blkbits);
2565 ret = 0;
2566 }
2567 return ret;
2568 }
2569
2570 static int bget_one(handle_t *handle, struct buffer_head *bh)
2571 {
2572 get_bh(bh);
2573 return 0;
2574 }
2575
2576 static int bput_one(handle_t *handle, struct buffer_head *bh)
2577 {
2578 put_bh(bh);
2579 return 0;
2580 }
2581
2582 static int __ext4_journalled_writepage(struct page *page,
2583 unsigned int len)
2584 {
2585 struct address_space *mapping = page->mapping;
2586 struct inode *inode = mapping->host;
2587 struct buffer_head *page_bufs;
2588 handle_t *handle = NULL;
2589 int ret = 0;
2590 int err;
2591
2592 page_bufs = page_buffers(page);
2593 BUG_ON(!page_bufs);
2594 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
2595 /* As soon as we unlock the page, it can go away, but we have
2596 * references to buffers so we are safe */
2597 unlock_page(page);
2598
2599 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
2600 if (IS_ERR(handle)) {
2601 ret = PTR_ERR(handle);
2602 goto out;
2603 }
2604
2605 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2606 do_journal_get_write_access);
2607
2608 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
2609 write_end_fn);
2610 if (ret == 0)
2611 ret = err;
2612 err = ext4_journal_stop(handle);
2613 if (!ret)
2614 ret = err;
2615
2616 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
2617 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
2618 out:
2619 return ret;
2620 }
2621
2622 /*
2623 * Note that we don't need to start a transaction unless we're journaling data
2624 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2625 * need to file the inode to the transaction's list in ordered mode because if
2626 * we are writing back data added by write(), the inode is already there and if
2627 * we are writing back data modified via mmap(), noone guarantees in which
2628 * transaction the data will hit the disk. In case we are journaling data, we
2629 * cannot start transaction directly because transaction start ranks above page
2630 * lock so we have to do some magic.
2631 *
2632 * This function can get called via...
2633 * - ext4_da_writepages after taking page lock (have journal handle)
2634 * - journal_submit_inode_data_buffers (no journal handle)
2635 * - shrink_page_list via pdflush (no journal handle)
2636 * - grab_page_cache when doing write_begin (have journal handle)
2637 *
2638 * We don't do any block allocation in this function. If we have page with
2639 * multiple blocks we need to write those buffer_heads that are mapped. This
2640 * is important for mmaped based write. So if we do with blocksize 1K
2641 * truncate(f, 1024);
2642 * a = mmap(f, 0, 4096);
2643 * a[0] = 'a';
2644 * truncate(f, 4096);
2645 * we have in the page first buffer_head mapped via page_mkwrite call back
2646 * but other bufer_heads would be unmapped but dirty(dirty done via the
2647 * do_wp_page). So writepage should write the first block. If we modify
2648 * the mmap area beyond 1024 we will again get a page_fault and the
2649 * page_mkwrite callback will do the block allocation and mark the
2650 * buffer_heads mapped.
2651 *
2652 * We redirty the page if we have any buffer_heads that is either delay or
2653 * unwritten in the page.
2654 *
2655 * We can get recursively called as show below.
2656 *
2657 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2658 * ext4_writepage()
2659 *
2660 * But since we don't do any block allocation we should not deadlock.
2661 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2662 */
2663 static int ext4_writepage(struct page *page,
2664 struct writeback_control *wbc)
2665 {
2666 int ret = 0;
2667 loff_t size;
2668 unsigned int len;
2669 struct buffer_head *page_bufs;
2670 struct inode *inode = page->mapping->host;
2671
2672 trace_ext4_writepage(inode, page);
2673 size = i_size_read(inode);
2674 if (page->index == size >> PAGE_CACHE_SHIFT)
2675 len = size & ~PAGE_CACHE_MASK;
2676 else
2677 len = PAGE_CACHE_SIZE;
2678
2679 if (page_has_buffers(page)) {
2680 page_bufs = page_buffers(page);
2681 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2682 ext4_bh_delay_or_unwritten)) {
2683 /*
2684 * We don't want to do block allocation
2685 * So redirty the page and return
2686 * We may reach here when we do a journal commit
2687 * via journal_submit_inode_data_buffers.
2688 * If we don't have mapping block we just ignore
2689 * them. We can also reach here via shrink_page_list
2690 */
2691 redirty_page_for_writepage(wbc, page);
2692 unlock_page(page);
2693 return 0;
2694 }
2695 } else {
2696 /*
2697 * The test for page_has_buffers() is subtle:
2698 * We know the page is dirty but it lost buffers. That means
2699 * that at some moment in time after write_begin()/write_end()
2700 * has been called all buffers have been clean and thus they
2701 * must have been written at least once. So they are all
2702 * mapped and we can happily proceed with mapping them
2703 * and writing the page.
2704 *
2705 * Try to initialize the buffer_heads and check whether
2706 * all are mapped and non delay. We don't want to
2707 * do block allocation here.
2708 */
2709 ret = block_prepare_write(page, 0, len,
2710 noalloc_get_block_write);
2711 if (!ret) {
2712 page_bufs = page_buffers(page);
2713 /* check whether all are mapped and non delay */
2714 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2715 ext4_bh_delay_or_unwritten)) {
2716 redirty_page_for_writepage(wbc, page);
2717 unlock_page(page);
2718 return 0;
2719 }
2720 } else {
2721 /*
2722 * We can't do block allocation here
2723 * so just redity the page and unlock
2724 * and return
2725 */
2726 redirty_page_for_writepage(wbc, page);
2727 unlock_page(page);
2728 return 0;
2729 }
2730 /* now mark the buffer_heads as dirty and uptodate */
2731 block_commit_write(page, 0, len);
2732 }
2733
2734 if (PageChecked(page) && ext4_should_journal_data(inode)) {
2735 /*
2736 * It's mmapped pagecache. Add buffers and journal it. There
2737 * doesn't seem much point in redirtying the page here.
2738 */
2739 ClearPageChecked(page);
2740 return __ext4_journalled_writepage(page, len);
2741 }
2742
2743 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2744 ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2745 else
2746 ret = block_write_full_page(page, noalloc_get_block_write,
2747 wbc);
2748
2749 return ret;
2750 }
2751
2752 /*
2753 * This is called via ext4_da_writepages() to
2754 * calulate the total number of credits to reserve to fit
2755 * a single extent allocation into a single transaction,
2756 * ext4_da_writpeages() will loop calling this before
2757 * the block allocation.
2758 */
2759
2760 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2761 {
2762 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2763
2764 /*
2765 * With non-extent format the journal credit needed to
2766 * insert nrblocks contiguous block is dependent on
2767 * number of contiguous block. So we will limit
2768 * number of contiguous block to a sane value
2769 */
2770 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
2771 (max_blocks > EXT4_MAX_TRANS_DATA))
2772 max_blocks = EXT4_MAX_TRANS_DATA;
2773
2774 return ext4_chunk_trans_blocks(inode, max_blocks);
2775 }
2776
2777 static int ext4_da_writepages(struct address_space *mapping,
2778 struct writeback_control *wbc)
2779 {
2780 pgoff_t index;
2781 int range_whole = 0;
2782 handle_t *handle = NULL;
2783 struct mpage_da_data mpd;
2784 struct inode *inode = mapping->host;
2785 int no_nrwrite_index_update;
2786 int pages_written = 0;
2787 long pages_skipped;
2788 unsigned int max_pages;
2789 int range_cyclic, cycled = 1, io_done = 0;
2790 int needed_blocks, ret = 0;
2791 long desired_nr_to_write, nr_to_writebump = 0;
2792 loff_t range_start = wbc->range_start;
2793 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2794
2795 trace_ext4_da_writepages(inode, wbc);
2796
2797 /*
2798 * No pages to write? This is mainly a kludge to avoid starting
2799 * a transaction for special inodes like journal inode on last iput()
2800 * because that could violate lock ordering on umount
2801 */
2802 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2803 return 0;
2804
2805 /*
2806 * If the filesystem has aborted, it is read-only, so return
2807 * right away instead of dumping stack traces later on that
2808 * will obscure the real source of the problem. We test
2809 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2810 * the latter could be true if the filesystem is mounted
2811 * read-only, and in that case, ext4_da_writepages should
2812 * *never* be called, so if that ever happens, we would want
2813 * the stack trace.
2814 */
2815 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2816 return -EROFS;
2817
2818 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2819 range_whole = 1;
2820
2821 range_cyclic = wbc->range_cyclic;
2822 if (wbc->range_cyclic) {
2823 index = mapping->writeback_index;
2824 if (index)
2825 cycled = 0;
2826 wbc->range_start = index << PAGE_CACHE_SHIFT;
2827 wbc->range_end = LLONG_MAX;
2828 wbc->range_cyclic = 0;
2829 } else
2830 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2831
2832 /*
2833 * This works around two forms of stupidity. The first is in
2834 * the writeback code, which caps the maximum number of pages
2835 * written to be 1024 pages. This is wrong on multiple
2836 * levels; different architectues have a different page size,
2837 * which changes the maximum amount of data which gets
2838 * written. Secondly, 4 megabytes is way too small. XFS
2839 * forces this value to be 16 megabytes by multiplying
2840 * nr_to_write parameter by four, and then relies on its
2841 * allocator to allocate larger extents to make them
2842 * contiguous. Unfortunately this brings us to the second
2843 * stupidity, which is that ext4's mballoc code only allocates
2844 * at most 2048 blocks. So we force contiguous writes up to
2845 * the number of dirty blocks in the inode, or
2846 * sbi->max_writeback_mb_bump whichever is smaller.
2847 */
2848 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2849 if (!range_cyclic && range_whole)
2850 desired_nr_to_write = wbc->nr_to_write * 8;
2851 else
2852 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2853 max_pages);
2854 if (desired_nr_to_write > max_pages)
2855 desired_nr_to_write = max_pages;
2856
2857 if (wbc->nr_to_write < desired_nr_to_write) {
2858 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2859 wbc->nr_to_write = desired_nr_to_write;
2860 }
2861
2862 mpd.wbc = wbc;
2863 mpd.inode = mapping->host;
2864
2865 /*
2866 * we don't want write_cache_pages to update
2867 * nr_to_write and writeback_index
2868 */
2869 no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2870 wbc->no_nrwrite_index_update = 1;
2871 pages_skipped = wbc->pages_skipped;
2872
2873 retry:
2874 while (!ret && wbc->nr_to_write > 0) {
2875
2876 /*
2877 * we insert one extent at a time. So we need
2878 * credit needed for single extent allocation.
2879 * journalled mode is currently not supported
2880 * by delalloc
2881 */
2882 BUG_ON(ext4_should_journal_data(inode));
2883 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2884
2885 /* start a new transaction*/
2886 handle = ext4_journal_start(inode, needed_blocks);
2887 if (IS_ERR(handle)) {
2888 ret = PTR_ERR(handle);
2889 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2890 "%ld pages, ino %lu; err %d\n", __func__,
2891 wbc->nr_to_write, inode->i_ino, ret);
2892 goto out_writepages;
2893 }
2894
2895 /*
2896 * Now call __mpage_da_writepage to find the next
2897 * contiguous region of logical blocks that need
2898 * blocks to be allocated by ext4. We don't actually
2899 * submit the blocks for I/O here, even though
2900 * write_cache_pages thinks it will, and will set the
2901 * pages as clean for write before calling
2902 * __mpage_da_writepage().
2903 */
2904 mpd.b_size = 0;
2905 mpd.b_state = 0;
2906 mpd.b_blocknr = 0;
2907 mpd.first_page = 0;
2908 mpd.next_page = 0;
2909 mpd.io_done = 0;
2910 mpd.pages_written = 0;
2911 mpd.retval = 0;
2912 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
2913 &mpd);
2914 /*
2915 * If we have a contiguous extent of pages and we
2916 * haven't done the I/O yet, map the blocks and submit
2917 * them for I/O.
2918 */
2919 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2920 if (mpage_da_map_blocks(&mpd) == 0)
2921 mpage_da_submit_io(&mpd);
2922 mpd.io_done = 1;
2923 ret = MPAGE_DA_EXTENT_TAIL;
2924 }
2925 trace_ext4_da_write_pages(inode, &mpd);
2926 wbc->nr_to_write -= mpd.pages_written;
2927
2928 ext4_journal_stop(handle);
2929
2930 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2931 /* commit the transaction which would
2932 * free blocks released in the transaction
2933 * and try again
2934 */
2935 jbd2_journal_force_commit_nested(sbi->s_journal);
2936 wbc->pages_skipped = pages_skipped;
2937 ret = 0;
2938 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2939 /*
2940 * got one extent now try with
2941 * rest of the pages
2942 */
2943 pages_written += mpd.pages_written;
2944 wbc->pages_skipped = pages_skipped;
2945 ret = 0;
2946 io_done = 1;
2947 } else if (wbc->nr_to_write)
2948 /*
2949 * There is no more writeout needed
2950 * or we requested for a noblocking writeout
2951 * and we found the device congested
2952 */
2953 break;
2954 }
2955 if (!io_done && !cycled) {
2956 cycled = 1;
2957 index = 0;
2958 wbc->range_start = index << PAGE_CACHE_SHIFT;
2959 wbc->range_end = mapping->writeback_index - 1;
2960 goto retry;
2961 }
2962 if (pages_skipped != wbc->pages_skipped)
2963 ext4_msg(inode->i_sb, KERN_CRIT,
2964 "This should not happen leaving %s "
2965 "with nr_to_write = %ld ret = %d\n",
2966 __func__, wbc->nr_to_write, ret);
2967
2968 /* Update index */
2969 index += pages_written;
2970 wbc->range_cyclic = range_cyclic;
2971 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2972 /*
2973 * set the writeback_index so that range_cyclic
2974 * mode will write it back later
2975 */
2976 mapping->writeback_index = index;
2977
2978 out_writepages:
2979 if (!no_nrwrite_index_update)
2980 wbc->no_nrwrite_index_update = 0;
2981 wbc->nr_to_write -= nr_to_writebump;
2982 wbc->range_start = range_start;
2983 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2984 return ret;
2985 }
2986
2987 #define FALL_BACK_TO_NONDELALLOC 1
2988 static int ext4_nonda_switch(struct super_block *sb)
2989 {
2990 s64 free_blocks, dirty_blocks;
2991 struct ext4_sb_info *sbi = EXT4_SB(sb);
2992
2993 /*
2994 * switch to non delalloc mode if we are running low
2995 * on free block. The free block accounting via percpu
2996 * counters can get slightly wrong with percpu_counter_batch getting
2997 * accumulated on each CPU without updating global counters
2998 * Delalloc need an accurate free block accounting. So switch
2999 * to non delalloc when we are near to error range.
3000 */
3001 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
3002 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
3003 if (2 * free_blocks < 3 * dirty_blocks ||
3004 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
3005 /*
3006 * free block count is less than 150% of dirty blocks
3007 * or free blocks is less than watermark
3008 */
3009 return 1;
3010 }
3011 /*
3012 * Even if we don't switch but are nearing capacity,
3013 * start pushing delalloc when 1/2 of free blocks are dirty.
3014 */
3015 if (free_blocks < 2 * dirty_blocks)
3016 writeback_inodes_sb_if_idle(sb);
3017
3018 return 0;
3019 }
3020
3021 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3022 loff_t pos, unsigned len, unsigned flags,
3023 struct page **pagep, void **fsdata)
3024 {
3025 int ret, retries = 0;
3026 struct page *page;
3027 pgoff_t index;
3028 unsigned from, to;
3029 struct inode *inode = mapping->host;
3030 handle_t *handle;
3031
3032 index = pos >> PAGE_CACHE_SHIFT;
3033 from = pos & (PAGE_CACHE_SIZE - 1);
3034 to = from + len;
3035
3036 if (ext4_nonda_switch(inode->i_sb)) {
3037 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3038 return ext4_write_begin(file, mapping, pos,
3039 len, flags, pagep, fsdata);
3040 }
3041 *fsdata = (void *)0;
3042 trace_ext4_da_write_begin(inode, pos, len, flags);
3043 retry:
3044 /*
3045 * With delayed allocation, we don't log the i_disksize update
3046 * if there is delayed block allocation. But we still need
3047 * to journalling the i_disksize update if writes to the end
3048 * of file which has an already mapped buffer.
3049 */
3050 handle = ext4_journal_start(inode, 1);
3051 if (IS_ERR(handle)) {
3052 ret = PTR_ERR(handle);
3053 goto out;
3054 }
3055 /* We cannot recurse into the filesystem as the transaction is already
3056 * started */
3057 flags |= AOP_FLAG_NOFS;
3058
3059 page = grab_cache_page_write_begin(mapping, index, flags);
3060 if (!page) {
3061 ext4_journal_stop(handle);
3062 ret = -ENOMEM;
3063 goto out;
3064 }
3065 *pagep = page;
3066
3067 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
3068 ext4_da_get_block_prep);
3069 if (ret < 0) {
3070 unlock_page(page);
3071 ext4_journal_stop(handle);
3072 page_cache_release(page);
3073 /*
3074 * block_write_begin may have instantiated a few blocks
3075 * outside i_size. Trim these off again. Don't need
3076 * i_size_read because we hold i_mutex.
3077 */
3078 if (pos + len > inode->i_size)
3079 ext4_truncate_failed_write(inode);
3080 }
3081
3082 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3083 goto retry;
3084 out:
3085 return ret;
3086 }
3087
3088 /*
3089 * Check if we should update i_disksize
3090 * when write to the end of file but not require block allocation
3091 */
3092 static int ext4_da_should_update_i_disksize(struct page *page,
3093 unsigned long offset)
3094 {
3095 struct buffer_head *bh;
3096 struct inode *inode = page->mapping->host;
3097 unsigned int idx;
3098 int i;
3099
3100 bh = page_buffers(page);
3101 idx = offset >> inode->i_blkbits;
3102
3103 for (i = 0; i < idx; i++)
3104 bh = bh->b_this_page;
3105
3106 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3107 return 0;
3108 return 1;
3109 }
3110
3111 static int ext4_da_write_end(struct file *file,
3112 struct address_space *mapping,
3113 loff_t pos, unsigned len, unsigned copied,
3114 struct page *page, void *fsdata)
3115 {
3116 struct inode *inode = mapping->host;
3117 int ret = 0, ret2;
3118 handle_t *handle = ext4_journal_current_handle();
3119 loff_t new_i_size;
3120 unsigned long start, end;
3121 int write_mode = (int)(unsigned long)fsdata;
3122
3123 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
3124 if (ext4_should_order_data(inode)) {
3125 return ext4_ordered_write_end(file, mapping, pos,
3126 len, copied, page, fsdata);
3127 } else if (ext4_should_writeback_data(inode)) {
3128 return ext4_writeback_write_end(file, mapping, pos,
3129 len, copied, page, fsdata);
3130 } else {
3131 BUG();
3132 }
3133 }
3134
3135 trace_ext4_da_write_end(inode, pos, len, copied);
3136 start = pos & (PAGE_CACHE_SIZE - 1);
3137 end = start + copied - 1;
3138
3139 /*
3140 * generic_write_end() will run mark_inode_dirty() if i_size
3141 * changes. So let's piggyback the i_disksize mark_inode_dirty
3142 * into that.
3143 */
3144
3145 new_i_size = pos + copied;
3146 if (new_i_size > EXT4_I(inode)->i_disksize) {
3147 if (ext4_da_should_update_i_disksize(page, end)) {
3148 down_write(&EXT4_I(inode)->i_data_sem);
3149 if (new_i_size > EXT4_I(inode)->i_disksize) {
3150 /*
3151 * Updating i_disksize when extending file
3152 * without needing block allocation
3153 */
3154 if (ext4_should_order_data(inode))
3155 ret = ext4_jbd2_file_inode(handle,
3156 inode);
3157
3158 EXT4_I(inode)->i_disksize = new_i_size;
3159 }
3160 up_write(&EXT4_I(inode)->i_data_sem);
3161 /* We need to mark inode dirty even if
3162 * new_i_size is less that inode->i_size
3163 * bu greater than i_disksize.(hint delalloc)
3164 */
3165 ext4_mark_inode_dirty(handle, inode);
3166 }
3167 }
3168 ret2 = generic_write_end(file, mapping, pos, len, copied,
3169 page, fsdata);
3170 copied = ret2;
3171 if (ret2 < 0)
3172 ret = ret2;
3173 ret2 = ext4_journal_stop(handle);
3174 if (!ret)
3175 ret = ret2;
3176
3177 return ret ? ret : copied;
3178 }
3179
3180 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
3181 {
3182 /*
3183 * Drop reserved blocks
3184 */
3185 BUG_ON(!PageLocked(page));
3186 if (!page_has_buffers(page))
3187 goto out;
3188
3189 ext4_da_page_release_reservation(page, offset);
3190
3191 out:
3192 ext4_invalidatepage(page, offset);
3193
3194 return;
3195 }
3196
3197 /*
3198 * Force all delayed allocation blocks to be allocated for a given inode.
3199 */
3200 int ext4_alloc_da_blocks(struct inode *inode)
3201 {
3202 trace_ext4_alloc_da_blocks(inode);
3203
3204 if (!EXT4_I(inode)->i_reserved_data_blocks &&
3205 !EXT4_I(inode)->i_reserved_meta_blocks)
3206 return 0;
3207
3208 /*
3209 * We do something simple for now. The filemap_flush() will
3210 * also start triggering a write of the data blocks, which is
3211 * not strictly speaking necessary (and for users of
3212 * laptop_mode, not even desirable). However, to do otherwise
3213 * would require replicating code paths in:
3214 *
3215 * ext4_da_writepages() ->
3216 * write_cache_pages() ---> (via passed in callback function)
3217 * __mpage_da_writepage() -->
3218 * mpage_add_bh_to_extent()
3219 * mpage_da_map_blocks()
3220 *
3221 * The problem is that write_cache_pages(), located in
3222 * mm/page-writeback.c, marks pages clean in preparation for
3223 * doing I/O, which is not desirable if we're not planning on
3224 * doing I/O at all.
3225 *
3226 * We could call write_cache_pages(), and then redirty all of
3227 * the pages by calling redirty_page_for_writeback() but that
3228 * would be ugly in the extreme. So instead we would need to
3229 * replicate parts of the code in the above functions,
3230 * simplifying them becuase we wouldn't actually intend to
3231 * write out the pages, but rather only collect contiguous
3232 * logical block extents, call the multi-block allocator, and
3233 * then update the buffer heads with the block allocations.
3234 *
3235 * For now, though, we'll cheat by calling filemap_flush(),
3236 * which will map the blocks, and start the I/O, but not
3237 * actually wait for the I/O to complete.
3238 */
3239 return filemap_flush(inode->i_mapping);
3240 }
3241
3242 /*
3243 * bmap() is special. It gets used by applications such as lilo and by
3244 * the swapper to find the on-disk block of a specific piece of data.
3245 *
3246 * Naturally, this is dangerous if the block concerned is still in the
3247 * journal. If somebody makes a swapfile on an ext4 data-journaling
3248 * filesystem and enables swap, then they may get a nasty shock when the
3249 * data getting swapped to that swapfile suddenly gets overwritten by
3250 * the original zero's written out previously to the journal and
3251 * awaiting writeback in the kernel's buffer cache.
3252 *
3253 * So, if we see any bmap calls here on a modified, data-journaled file,
3254 * take extra steps to flush any blocks which might be in the cache.
3255 */
3256 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3257 {
3258 struct inode *inode = mapping->host;
3259 journal_t *journal;
3260 int err;
3261
3262 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3263 test_opt(inode->i_sb, DELALLOC)) {
3264 /*
3265 * With delalloc we want to sync the file
3266 * so that we can make sure we allocate
3267 * blocks for file
3268 */
3269 filemap_write_and_wait(mapping);
3270 }
3271
3272 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
3273 /*
3274 * This is a REALLY heavyweight approach, but the use of
3275 * bmap on dirty files is expected to be extremely rare:
3276 * only if we run lilo or swapon on a freshly made file
3277 * do we expect this to happen.
3278 *
3279 * (bmap requires CAP_SYS_RAWIO so this does not
3280 * represent an unprivileged user DOS attack --- we'd be
3281 * in trouble if mortal users could trigger this path at
3282 * will.)
3283 *
3284 * NB. EXT4_STATE_JDATA is not set on files other than
3285 * regular files. If somebody wants to bmap a directory
3286 * or symlink and gets confused because the buffer
3287 * hasn't yet been flushed to disk, they deserve
3288 * everything they get.
3289 */
3290
3291 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
3292 journal = EXT4_JOURNAL(inode);
3293 jbd2_journal_lock_updates(journal);
3294 err = jbd2_journal_flush(journal);
3295 jbd2_journal_unlock_updates(journal);
3296
3297 if (err)
3298 return 0;
3299 }
3300
3301 return generic_block_bmap(mapping, block, ext4_get_block);
3302 }
3303
3304 static int ext4_readpage(struct file *file, struct page *page)
3305 {
3306 return mpage_readpage(page, ext4_get_block);
3307 }
3308
3309 static int
3310 ext4_readpages(struct file *file, struct address_space *mapping,
3311 struct list_head *pages, unsigned nr_pages)
3312 {
3313 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
3314 }
3315
3316 static void ext4_invalidatepage(struct page *page, unsigned long offset)
3317 {
3318 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3319
3320 /*
3321 * If it's a full truncate we just forget about the pending dirtying
3322 */
3323 if (offset == 0)
3324 ClearPageChecked(page);
3325
3326 if (journal)
3327 jbd2_journal_invalidatepage(journal, page, offset);
3328 else
3329 block_invalidatepage(page, offset);
3330 }
3331
3332 static int ext4_releasepage(struct page *page, gfp_t wait)
3333 {
3334 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3335
3336 WARN_ON(PageChecked(page));
3337 if (!page_has_buffers(page))
3338 return 0;
3339 if (journal)
3340 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3341 else
3342 return try_to_free_buffers(page);
3343 }
3344
3345 /*
3346 * O_DIRECT for ext3 (or indirect map) based files
3347 *
3348 * If the O_DIRECT write will extend the file then add this inode to the
3349 * orphan list. So recovery will truncate it back to the original size
3350 * if the machine crashes during the write.
3351 *
3352 * If the O_DIRECT write is intantiating holes inside i_size and the machine
3353 * crashes then stale disk data _may_ be exposed inside the file. But current
3354 * VFS code falls back into buffered path in that case so we are safe.
3355 */
3356 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3357 const struct iovec *iov, loff_t offset,
3358 unsigned long nr_segs)
3359 {
3360 struct file *file = iocb->ki_filp;
3361 struct inode *inode = file->f_mapping->host;
3362 struct ext4_inode_info *ei = EXT4_I(inode);
3363 handle_t *handle;
3364 ssize_t ret;
3365 int orphan = 0;
3366 size_t count = iov_length(iov, nr_segs);
3367 int retries = 0;
3368
3369 if (rw == WRITE) {
3370 loff_t final_size = offset + count;
3371
3372 if (final_size > inode->i_size) {
3373 /* Credits for sb + inode write */
3374 handle = ext4_journal_start(inode, 2);
3375 if (IS_ERR(handle)) {
3376 ret = PTR_ERR(handle);
3377 goto out;
3378 }
3379 ret = ext4_orphan_add(handle, inode);
3380 if (ret) {
3381 ext4_journal_stop(handle);
3382 goto out;
3383 }
3384 orphan = 1;
3385 ei->i_disksize = inode->i_size;
3386 ext4_journal_stop(handle);
3387 }
3388 }
3389
3390 retry:
3391 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3392 offset, nr_segs,
3393 ext4_get_block, NULL);
3394 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3395 goto retry;
3396
3397 if (orphan) {
3398 int err;
3399
3400 /* Credits for sb + inode write */
3401 handle = ext4_journal_start(inode, 2);
3402 if (IS_ERR(handle)) {
3403 /* This is really bad luck. We've written the data
3404 * but cannot extend i_size. Bail out and pretend
3405 * the write failed... */
3406 ret = PTR_ERR(handle);
3407 goto out;
3408 }
3409 if (inode->i_nlink)
3410 ext4_orphan_del(handle, inode);
3411 if (ret > 0) {
3412 loff_t end = offset + ret;
3413 if (end > inode->i_size) {
3414 ei->i_disksize = end;
3415 i_size_write(inode, end);
3416 /*
3417 * We're going to return a positive `ret'
3418 * here due to non-zero-length I/O, so there's
3419 * no way of reporting error returns from
3420 * ext4_mark_inode_dirty() to userspace. So
3421 * ignore it.
3422 */
3423 ext4_mark_inode_dirty(handle, inode);
3424 }
3425 }
3426 err = ext4_journal_stop(handle);
3427 if (ret == 0)
3428 ret = err;
3429 }
3430 out:
3431 return ret;
3432 }
3433
3434 static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
3435 struct buffer_head *bh_result, int create)
3436 {
3437 handle_t *handle = NULL;
3438 int ret = 0;
3439 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3440 int dio_credits;
3441
3442 ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
3443 inode->i_ino, create);
3444 /*
3445 * DIO VFS code passes create = 0 flag for write to
3446 * the middle of file. It does this to avoid block
3447 * allocation for holes, to prevent expose stale data
3448 * out when there is parallel buffered read (which does
3449 * not hold the i_mutex lock) while direct IO write has
3450 * not completed. DIO request on holes finally falls back
3451 * to buffered IO for this reason.
3452 *
3453 * For ext4 extent based file, since we support fallocate,
3454 * new allocated extent as uninitialized, for holes, we
3455 * could fallocate blocks for holes, thus parallel
3456 * buffered IO read will zero out the page when read on
3457 * a hole while parallel DIO write to the hole has not completed.
3458 *
3459 * when we come here, we know it's a direct IO write to
3460 * to the middle of file (<i_size)
3461 * so it's safe to override the create flag from VFS.
3462 */
3463 create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
3464
3465 if (max_blocks > DIO_MAX_BLOCKS)
3466 max_blocks = DIO_MAX_BLOCKS;
3467 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3468 handle = ext4_journal_start(inode, dio_credits);
3469 if (IS_ERR(handle)) {
3470 ret = PTR_ERR(handle);
3471 goto out;
3472 }
3473 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3474 create);
3475 if (ret > 0) {
3476 bh_result->b_size = (ret << inode->i_blkbits);
3477 ret = 0;
3478 }
3479 ext4_journal_stop(handle);
3480 out:
3481 return ret;
3482 }
3483
3484 static void ext4_free_io_end(ext4_io_end_t *io)
3485 {
3486 BUG_ON(!io);
3487 iput(io->inode);
3488 kfree(io);
3489 }
3490 static void dump_aio_dio_list(struct inode * inode)
3491 {
3492 #ifdef EXT4_DEBUG
3493 struct list_head *cur, *before, *after;
3494 ext4_io_end_t *io, *io0, *io1;
3495
3496 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3497 ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
3498 return;
3499 }
3500
3501 ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
3502 list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
3503 cur = &io->list;
3504 before = cur->prev;
3505 io0 = container_of(before, ext4_io_end_t, list);
3506 after = cur->next;
3507 io1 = container_of(after, ext4_io_end_t, list);
3508
3509 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
3510 io, inode->i_ino, io0, io1);
3511 }
3512 #endif
3513 }
3514
3515 /*
3516 * check a range of space and convert unwritten extents to written.
3517 */
3518 static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3519 {
3520 struct inode *inode = io->inode;
3521 loff_t offset = io->offset;
3522 size_t size = io->size;
3523 int ret = 0;
3524
3525 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3526 "list->prev 0x%p\n",
3527 io, inode->i_ino, io->list.next, io->list.prev);
3528
3529 if (list_empty(&io->list))
3530 return ret;
3531
3532 if (io->flag != DIO_AIO_UNWRITTEN)
3533 return ret;
3534
3535 if (offset + size <= i_size_read(inode))
3536 ret = ext4_convert_unwritten_extents(inode, offset, size);
3537
3538 if (ret < 0) {
3539 printk(KERN_EMERG "%s: failed to convert unwritten"
3540 "extents to written extents, error is %d"
3541 " io is still on inode %lu aio dio list\n",
3542 __func__, ret, inode->i_ino);
3543 return ret;
3544 }
3545
3546 /* clear the DIO AIO unwritten flag */
3547 io->flag = 0;
3548 return ret;
3549 }
3550 /*
3551 * work on completed aio dio IO, to convert unwritten extents to extents
3552 */
3553 static void ext4_end_aio_dio_work(struct work_struct *work)
3554 {
3555 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
3556 struct inode *inode = io->inode;
3557 int ret = 0;
3558
3559 mutex_lock(&inode->i_mutex);
3560 ret = ext4_end_aio_dio_nolock(io);
3561 if (ret >= 0) {
3562 if (!list_empty(&io->list))
3563 list_del_init(&io->list);
3564 ext4_free_io_end(io);
3565 }
3566 mutex_unlock(&inode->i_mutex);
3567 }
3568 /*
3569 * This function is called from ext4_sync_file().
3570 *
3571 * When AIO DIO IO is completed, the work to convert unwritten
3572 * extents to written is queued on workqueue but may not get immediately
3573 * scheduled. When fsync is called, we need to ensure the
3574 * conversion is complete before fsync returns.
3575 * The inode keeps track of a list of completed AIO from DIO path
3576 * that might needs to do the conversion. This function walks through
3577 * the list and convert the related unwritten extents to written.
3578 */
3579 int flush_aio_dio_completed_IO(struct inode *inode)
3580 {
3581 ext4_io_end_t *io;
3582 int ret = 0;
3583 int ret2 = 0;
3584
3585 if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
3586 return ret;
3587
3588 dump_aio_dio_list(inode);
3589 while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
3590 io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
3591 ext4_io_end_t, list);
3592 /*
3593 * Calling ext4_end_aio_dio_nolock() to convert completed
3594 * IO to written.
3595 *
3596 * When ext4_sync_file() is called, run_queue() may already
3597 * about to flush the work corresponding to this io structure.
3598 * It will be upset if it founds the io structure related
3599 * to the work-to-be schedule is freed.
3600 *
3601 * Thus we need to keep the io structure still valid here after
3602 * convertion finished. The io structure has a flag to
3603 * avoid double converting from both fsync and background work
3604 * queue work.
3605 */
3606 ret = ext4_end_aio_dio_nolock(io);
3607 if (ret < 0)
3608 ret2 = ret;
3609 else
3610 list_del_init(&io->list);
3611 }
3612 return (ret2 < 0) ? ret2 : 0;
3613 }
3614
3615 static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
3616 {
3617 ext4_io_end_t *io = NULL;
3618
3619 io = kmalloc(sizeof(*io), GFP_NOFS);
3620
3621 if (io) {
3622 igrab(inode);
3623 io->inode = inode;
3624 io->flag = 0;
3625 io->offset = 0;
3626 io->size = 0;
3627 io->error = 0;
3628 INIT_WORK(&io->work, ext4_end_aio_dio_work);
3629 INIT_LIST_HEAD(&io->list);
3630 }
3631
3632 return io;
3633 }
3634
3635 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3636 ssize_t size, void *private)
3637 {
3638 ext4_io_end_t *io_end = iocb->private;
3639 struct workqueue_struct *wq;
3640
3641 /* if not async direct IO or dio with 0 bytes write, just return */
3642 if (!io_end || !size)
3643 return;
3644
3645 ext_debug("ext4_end_io_dio(): io_end 0x%p"
3646 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
3647 iocb->private, io_end->inode->i_ino, iocb, offset,
3648 size);
3649
3650 /* if not aio dio with unwritten extents, just free io and return */
3651 if (io_end->flag != DIO_AIO_UNWRITTEN){
3652 ext4_free_io_end(io_end);
3653 iocb->private = NULL;
3654 return;
3655 }
3656
3657 io_end->offset = offset;
3658 io_end->size = size;
3659 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
3660
3661 /* queue the work to convert unwritten extents to written */
3662 queue_work(wq, &io_end->work);
3663
3664 /* Add the io_end to per-inode completed aio dio list*/
3665 list_add_tail(&io_end->list,
3666 &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
3667 iocb->private = NULL;
3668 }
3669 /*
3670 * For ext4 extent files, ext4 will do direct-io write to holes,
3671 * preallocated extents, and those write extend the file, no need to
3672 * fall back to buffered IO.
3673 *
3674 * For holes, we fallocate those blocks, mark them as unintialized
3675 * If those blocks were preallocated, we mark sure they are splited, but
3676 * still keep the range to write as unintialized.
3677 *
3678 * The unwrritten extents will be converted to written when DIO is completed.
3679 * For async direct IO, since the IO may still pending when return, we
3680 * set up an end_io call back function, which will do the convertion
3681 * when async direct IO completed.
3682 *
3683 * If the O_DIRECT write will extend the file then add this inode to the
3684 * orphan list. So recovery will truncate it back to the original size
3685 * if the machine crashes during the write.
3686 *
3687 */
3688 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3689 const struct iovec *iov, loff_t offset,
3690 unsigned long nr_segs)
3691 {
3692 struct file *file = iocb->ki_filp;
3693 struct inode *inode = file->f_mapping->host;
3694 ssize_t ret;
3695 size_t count = iov_length(iov, nr_segs);
3696
3697 loff_t final_size = offset + count;
3698 if (rw == WRITE && final_size <= inode->i_size) {
3699 /*
3700 * We could direct write to holes and fallocate.
3701 *
3702 * Allocated blocks to fill the hole are marked as uninitialized
3703 * to prevent paralel buffered read to expose the stale data
3704 * before DIO complete the data IO.
3705 *
3706 * As to previously fallocated extents, ext4 get_block
3707 * will just simply mark the buffer mapped but still
3708 * keep the extents uninitialized.
3709 *
3710 * for non AIO case, we will convert those unwritten extents
3711 * to written after return back from blockdev_direct_IO.
3712 *
3713 * for async DIO, the conversion needs to be defered when
3714 * the IO is completed. The ext4 end_io callback function
3715 * will be called to take care of the conversion work.
3716 * Here for async case, we allocate an io_end structure to
3717 * hook to the iocb.
3718 */
3719 iocb->private = NULL;
3720 EXT4_I(inode)->cur_aio_dio = NULL;
3721 if (!is_sync_kiocb(iocb)) {
3722 iocb->private = ext4_init_io_end(inode);
3723 if (!iocb->private)
3724 return -ENOMEM;
3725 /*
3726 * we save the io structure for current async
3727 * direct IO, so that later ext4_get_blocks()
3728 * could flag the io structure whether there
3729 * is a unwritten extents needs to be converted
3730 * when IO is completed.
3731 */
3732 EXT4_I(inode)->cur_aio_dio = iocb->private;
3733 }
3734
3735 ret = blockdev_direct_IO(rw, iocb, inode,
3736 inode->i_sb->s_bdev, iov,
3737 offset, nr_segs,
3738 ext4_get_block_dio_write,
3739 ext4_end_io_dio);
3740 if (iocb->private)
3741 EXT4_I(inode)->cur_aio_dio = NULL;
3742 /*
3743 * The io_end structure takes a reference to the inode,
3744 * that structure needs to be destroyed and the
3745 * reference to the inode need to be dropped, when IO is
3746 * complete, even with 0 byte write, or failed.
3747 *
3748 * In the successful AIO DIO case, the io_end structure will be
3749 * desctroyed and the reference to the inode will be dropped
3750 * after the end_io call back function is called.
3751 *
3752 * In the case there is 0 byte write, or error case, since
3753 * VFS direct IO won't invoke the end_io call back function,
3754 * we need to free the end_io structure here.
3755 */
3756 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3757 ext4_free_io_end(iocb->private);
3758 iocb->private = NULL;
3759 } else if (ret > 0 && (EXT4_I(inode)->i_state &
3760 EXT4_STATE_DIO_UNWRITTEN)) {
3761 int err;
3762 /*
3763 * for non AIO case, since the IO is already
3764 * completed, we could do the convertion right here
3765 */
3766 err = ext4_convert_unwritten_extents(inode,
3767 offset, ret);
3768 if (err < 0)
3769 ret = err;
3770 EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
3771 }
3772 return ret;
3773 }
3774
3775 /* for write the the end of file case, we fall back to old way */
3776 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3777 }
3778
3779 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3780 const struct iovec *iov, loff_t offset,
3781 unsigned long nr_segs)
3782 {
3783 struct file *file = iocb->ki_filp;
3784 struct inode *inode = file->f_mapping->host;
3785
3786 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3787 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3788
3789 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3790 }
3791
3792 /*
3793 * Pages can be marked dirty completely asynchronously from ext4's journalling
3794 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3795 * much here because ->set_page_dirty is called under VFS locks. The page is
3796 * not necessarily locked.
3797 *
3798 * We cannot just dirty the page and leave attached buffers clean, because the
3799 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3800 * or jbddirty because all the journalling code will explode.
3801 *
3802 * So what we do is to mark the page "pending dirty" and next time writepage
3803 * is called, propagate that into the buffers appropriately.
3804 */
3805 static int ext4_journalled_set_page_dirty(struct page *page)
3806 {
3807 SetPageChecked(page);
3808 return __set_page_dirty_nobuffers(page);
3809 }
3810
3811 static const struct address_space_operations ext4_ordered_aops = {
3812 .readpage = ext4_readpage,
3813 .readpages = ext4_readpages,
3814 .writepage = ext4_writepage,
3815 .sync_page = block_sync_page,
3816 .write_begin = ext4_write_begin,
3817 .write_end = ext4_ordered_write_end,
3818 .bmap = ext4_bmap,
3819 .invalidatepage = ext4_invalidatepage,
3820 .releasepage = ext4_releasepage,
3821 .direct_IO = ext4_direct_IO,
3822 .migratepage = buffer_migrate_page,
3823 .is_partially_uptodate = block_is_partially_uptodate,
3824 .error_remove_page = generic_error_remove_page,
3825 };
3826
3827 static const struct address_space_operations ext4_writeback_aops = {
3828 .readpage = ext4_readpage,
3829 .readpages = ext4_readpages,
3830 .writepage = ext4_writepage,
3831 .sync_page = block_sync_page,
3832 .write_begin = ext4_write_begin,
3833 .write_end = ext4_writeback_write_end,
3834 .bmap = ext4_bmap,
3835 .invalidatepage = ext4_invalidatepage,
3836 .releasepage = ext4_releasepage,
3837 .direct_IO = ext4_direct_IO,
3838 .migratepage = buffer_migrate_page,
3839 .is_partially_uptodate = block_is_partially_uptodate,
3840 .error_remove_page = generic_error_remove_page,
3841 };
3842
3843 static const struct address_space_operations ext4_journalled_aops = {
3844 .readpage = ext4_readpage,
3845 .readpages = ext4_readpages,
3846 .writepage = ext4_writepage,
3847 .sync_page = block_sync_page,
3848 .write_begin = ext4_write_begin,
3849 .write_end = ext4_journalled_write_end,
3850 .set_page_dirty = ext4_journalled_set_page_dirty,
3851 .bmap = ext4_bmap,
3852 .invalidatepage = ext4_invalidatepage,
3853 .releasepage = ext4_releasepage,
3854 .is_partially_uptodate = block_is_partially_uptodate,
3855 .error_remove_page = generic_error_remove_page,
3856 };
3857
3858 static const struct address_space_operations ext4_da_aops = {
3859 .readpage = ext4_readpage,
3860 .readpages = ext4_readpages,
3861 .writepage = ext4_writepage,
3862 .writepages = ext4_da_writepages,
3863 .sync_page = block_sync_page,
3864 .write_begin = ext4_da_write_begin,
3865 .write_end = ext4_da_write_end,
3866 .bmap = ext4_bmap,
3867 .invalidatepage = ext4_da_invalidatepage,
3868 .releasepage = ext4_releasepage,
3869 .direct_IO = ext4_direct_IO,
3870 .migratepage = buffer_migrate_page,
3871 .is_partially_uptodate = block_is_partially_uptodate,
3872 .error_remove_page = generic_error_remove_page,
3873 };
3874
3875 void ext4_set_aops(struct inode *inode)
3876 {
3877 if (ext4_should_order_data(inode) &&
3878 test_opt(inode->i_sb, DELALLOC))
3879 inode->i_mapping->a_ops = &ext4_da_aops;
3880 else if (ext4_should_order_data(inode))
3881 inode->i_mapping->a_ops = &ext4_ordered_aops;
3882 else if (ext4_should_writeback_data(inode) &&
3883 test_opt(inode->i_sb, DELALLOC))
3884 inode->i_mapping->a_ops = &ext4_da_aops;
3885 else if (ext4_should_writeback_data(inode))
3886 inode->i_mapping->a_ops = &ext4_writeback_aops;
3887 else
3888 inode->i_mapping->a_ops = &ext4_journalled_aops;
3889 }
3890
3891 /*
3892 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3893 * up to the end of the block which corresponds to `from'.
3894 * This required during truncate. We need to physically zero the tail end
3895 * of that block so it doesn't yield old data if the file is later grown.
3896 */
3897 int ext4_block_truncate_page(handle_t *handle,
3898 struct address_space *mapping, loff_t from)
3899 {
3900 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3901 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3902 unsigned blocksize, length, pos;
3903 ext4_lblk_t iblock;
3904 struct inode *inode = mapping->host;
3905 struct buffer_head *bh;
3906 struct page *page;
3907 int err = 0;
3908
3909 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3910 mapping_gfp_mask(mapping) & ~__GFP_FS);
3911 if (!page)
3912 return -EINVAL;
3913
3914 blocksize = inode->i_sb->s_blocksize;
3915 length = blocksize - (offset & (blocksize - 1));
3916 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3917
3918 /*
3919 * For "nobh" option, we can only work if we don't need to
3920 * read-in the page - otherwise we create buffers to do the IO.
3921 */
3922 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
3923 ext4_should_writeback_data(inode) && PageUptodate(page)) {
3924 zero_user(page, offset, length);
3925 set_page_dirty(page);
3926 goto unlock;
3927 }
3928
3929 if (!page_has_buffers(page))
3930 create_empty_buffers(page, blocksize, 0);
3931
3932 /* Find the buffer that contains "offset" */
3933 bh = page_buffers(page);
3934 pos = blocksize;
3935 while (offset >= pos) {
3936 bh = bh->b_this_page;
3937 iblock++;
3938 pos += blocksize;
3939 }
3940
3941 err = 0;
3942 if (buffer_freed(bh)) {
3943 BUFFER_TRACE(bh, "freed: skip");
3944 goto unlock;
3945 }
3946
3947 if (!buffer_mapped(bh)) {
3948 BUFFER_TRACE(bh, "unmapped");
3949 ext4_get_block(inode, iblock, bh, 0);
3950 /* unmapped? It's a hole - nothing to do */
3951 if (!buffer_mapped(bh)) {
3952 BUFFER_TRACE(bh, "still unmapped");
3953 goto unlock;
3954 }
3955 }
3956
3957 /* Ok, it's mapped. Make sure it's up-to-date */
3958 if (PageUptodate(page))
3959 set_buffer_uptodate(bh);
3960
3961 if (!buffer_uptodate(bh)) {
3962 err = -EIO;
3963 ll_rw_block(READ, 1, &bh);
3964 wait_on_buffer(bh);
3965 /* Uhhuh. Read error. Complain and punt. */
3966 if (!buffer_uptodate(bh))
3967 goto unlock;
3968 }
3969
3970 if (ext4_should_journal_data(inode)) {
3971 BUFFER_TRACE(bh, "get write access");
3972 err = ext4_journal_get_write_access(handle, bh);
3973 if (err)
3974 goto unlock;
3975 }
3976
3977 zero_user(page, offset, length);
3978
3979 BUFFER_TRACE(bh, "zeroed end of block");
3980
3981 err = 0;
3982 if (ext4_should_journal_data(inode)) {
3983 err = ext4_handle_dirty_metadata(handle, inode, bh);
3984 } else {
3985 if (ext4_should_order_data(inode))
3986 err = ext4_jbd2_file_inode(handle, inode);
3987 mark_buffer_dirty(bh);
3988 }
3989
3990 unlock:
3991 unlock_page(page);
3992 page_cache_release(page);
3993 return err;
3994 }
3995
3996 /*
3997 * Probably it should be a library function... search for first non-zero word
3998 * or memcmp with zero_page, whatever is better for particular architecture.
3999 * Linus?
4000 */
4001 static inline int all_zeroes(__le32 *p, __le32 *q)
4002 {
4003 while (p < q)
4004 if (*p++)
4005 return 0;
4006 return 1;
4007 }
4008
4009 /**
4010 * ext4_find_shared - find the indirect blocks for partial truncation.
4011 * @inode: inode in question
4012 * @depth: depth of the affected branch
4013 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
4014 * @chain: place to store the pointers to partial indirect blocks
4015 * @top: place to the (detached) top of branch
4016 *
4017 * This is a helper function used by ext4_truncate().
4018 *
4019 * When we do truncate() we may have to clean the ends of several
4020 * indirect blocks but leave the blocks themselves alive. Block is
4021 * partially truncated if some data below the new i_size is refered
4022 * from it (and it is on the path to the first completely truncated
4023 * data block, indeed). We have to free the top of that path along
4024 * with everything to the right of the path. Since no allocation
4025 * past the truncation point is possible until ext4_truncate()
4026 * finishes, we may safely do the latter, but top of branch may
4027 * require special attention - pageout below the truncation point
4028 * might try to populate it.
4029 *
4030 * We atomically detach the top of branch from the tree, store the
4031 * block number of its root in *@top, pointers to buffer_heads of
4032 * partially truncated blocks - in @chain[].bh and pointers to
4033 * their last elements that should not be removed - in
4034 * @chain[].p. Return value is the pointer to last filled element
4035 * of @chain.
4036 *
4037 * The work left to caller to do the actual freeing of subtrees:
4038 * a) free the subtree starting from *@top
4039 * b) free the subtrees whose roots are stored in
4040 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
4041 * c) free the subtrees growing from the inode past the @chain[0].
4042 * (no partially truncated stuff there). */
4043
4044 static Indirect *ext4_find_shared(struct inode *inode, int depth,
4045 ext4_lblk_t offsets[4], Indirect chain[4],
4046 __le32 *top)
4047 {
4048 Indirect *partial, *p;
4049 int k, err;
4050
4051 *top = 0;
4052 /* Make k index the deepest non-null offset + 1 */
4053 for (k = depth; k > 1 && !offsets[k-1]; k--)
4054 ;
4055 partial = ext4_get_branch(inode, k, offsets, chain, &err);
4056 /* Writer: pointers */
4057 if (!partial)
4058 partial = chain + k-1;
4059 /*
4060 * If the branch acquired continuation since we've looked at it -
4061 * fine, it should all survive and (new) top doesn't belong to us.
4062 */
4063 if (!partial->key && *partial->p)
4064 /* Writer: end */
4065 goto no_top;
4066 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
4067 ;
4068 /*
4069 * OK, we've found the last block that must survive. The rest of our
4070 * branch should be detached before unlocking. However, if that rest
4071 * of branch is all ours and does not grow immediately from the inode
4072 * it's easier to cheat and just decrement partial->p.
4073 */
4074 if (p == chain + k - 1 && p > chain) {
4075 p->p--;
4076 } else {
4077 *top = *p->p;
4078 /* Nope, don't do this in ext4. Must leave the tree intact */
4079 #if 0
4080 *p->p = 0;
4081 #endif
4082 }
4083 /* Writer: end */
4084
4085 while (partial > p) {
4086 brelse(partial->bh);
4087 partial--;
4088 }
4089 no_top:
4090 return partial;
4091 }
4092
4093 /*
4094 * Zero a number of block pointers in either an inode or an indirect block.
4095 * If we restart the transaction we must again get write access to the
4096 * indirect block for further modification.
4097 *
4098 * We release `count' blocks on disk, but (last - first) may be greater
4099 * than `count' because there can be holes in there.
4100 */
4101 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
4102 struct buffer_head *bh,
4103 ext4_fsblk_t block_to_free,
4104 unsigned long count, __le32 *first,
4105 __le32 *last)
4106 {
4107 __le32 *p;
4108 int flags = EXT4_FREE_BLOCKS_FORGET;
4109
4110 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4111 flags |= EXT4_FREE_BLOCKS_METADATA;
4112
4113 if (try_to_extend_transaction(handle, inode)) {
4114 if (bh) {
4115 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4116 ext4_handle_dirty_metadata(handle, inode, bh);
4117 }
4118 ext4_mark_inode_dirty(handle, inode);
4119 ext4_truncate_restart_trans(handle, inode,
4120 blocks_for_truncate(inode));
4121 if (bh) {
4122 BUFFER_TRACE(bh, "retaking write access");
4123 ext4_journal_get_write_access(handle, bh);
4124 }
4125 }
4126
4127 for (p = first; p < last; p++)
4128 *p = 0;
4129
4130 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
4131 }
4132
4133 /**
4134 * ext4_free_data - free a list of data blocks
4135 * @handle: handle for this transaction
4136 * @inode: inode we are dealing with
4137 * @this_bh: indirect buffer_head which contains *@first and *@last
4138 * @first: array of block numbers
4139 * @last: points immediately past the end of array
4140 *
4141 * We are freeing all blocks refered from that array (numbers are stored as
4142 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
4143 *
4144 * We accumulate contiguous runs of blocks to free. Conveniently, if these
4145 * blocks are contiguous then releasing them at one time will only affect one
4146 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
4147 * actually use a lot of journal space.
4148 *
4149 * @this_bh will be %NULL if @first and @last point into the inode's direct
4150 * block pointers.
4151 */
4152 static void ext4_free_data(handle_t *handle, struct inode *inode,
4153 struct buffer_head *this_bh,
4154 __le32 *first, __le32 *last)
4155 {
4156 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
4157 unsigned long count = 0; /* Number of blocks in the run */
4158 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
4159 corresponding to
4160 block_to_free */
4161 ext4_fsblk_t nr; /* Current block # */
4162 __le32 *p; /* Pointer into inode/ind
4163 for current block */
4164 int err;
4165
4166 if (this_bh) { /* For indirect block */
4167 BUFFER_TRACE(this_bh, "get_write_access");
4168 err = ext4_journal_get_write_access(handle, this_bh);
4169 /* Important: if we can't update the indirect pointers
4170 * to the blocks, we can't free them. */
4171 if (err)
4172 return;
4173 }
4174
4175 for (p = first; p < last; p++) {
4176 nr = le32_to_cpu(*p);
4177 if (nr) {
4178 /* accumulate blocks to free if they're contiguous */
4179 if (count == 0) {
4180 block_to_free = nr;
4181 block_to_free_p = p;
4182 count = 1;
4183 } else if (nr == block_to_free + count) {
4184 count++;
4185 } else {
4186 ext4_clear_blocks(handle, inode, this_bh,
4187 block_to_free,
4188 count, block_to_free_p, p);
4189 block_to_free = nr;
4190 block_to_free_p = p;
4191 count = 1;
4192 }
4193 }
4194 }
4195
4196 if (count > 0)
4197 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
4198 count, block_to_free_p, p);
4199
4200 if (this_bh) {
4201 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
4202
4203 /*
4204 * The buffer head should have an attached journal head at this
4205 * point. However, if the data is corrupted and an indirect
4206 * block pointed to itself, it would have been detached when
4207 * the block was cleared. Check for this instead of OOPSing.
4208 */
4209 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4210 ext4_handle_dirty_metadata(handle, inode, this_bh);
4211 else
4212 ext4_error(inode->i_sb, __func__,
4213 "circular indirect block detected, "
4214 "inode=%lu, block=%llu",
4215 inode->i_ino,
4216 (unsigned long long) this_bh->b_blocknr);
4217 }
4218 }
4219
4220 /**
4221 * ext4_free_branches - free an array of branches
4222 * @handle: JBD handle for this transaction
4223 * @inode: inode we are dealing with
4224 * @parent_bh: the buffer_head which contains *@first and *@last
4225 * @first: array of block numbers
4226 * @last: pointer immediately past the end of array
4227 * @depth: depth of the branches to free
4228 *
4229 * We are freeing all blocks refered from these branches (numbers are
4230 * stored as little-endian 32-bit) and updating @inode->i_blocks
4231 * appropriately.
4232 */
4233 static void ext4_free_branches(handle_t *handle, struct inode *inode,
4234 struct buffer_head *parent_bh,
4235 __le32 *first, __le32 *last, int depth)
4236 {
4237 ext4_fsblk_t nr;
4238 __le32 *p;
4239
4240 if (ext4_handle_is_aborted(handle))
4241 return;
4242
4243 if (depth--) {
4244 struct buffer_head *bh;
4245 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4246 p = last;
4247 while (--p >= first) {
4248 nr = le32_to_cpu(*p);
4249 if (!nr)
4250 continue; /* A hole */
4251
4252 /* Go read the buffer for the next level down */
4253 bh = sb_bread(inode->i_sb, nr);
4254
4255 /*
4256 * A read failure? Report error and clear slot
4257 * (should be rare).
4258 */
4259 if (!bh) {
4260 ext4_error(inode->i_sb, "ext4_free_branches",
4261 "Read failure, inode=%lu, block=%llu",
4262 inode->i_ino, nr);
4263 continue;
4264 }
4265
4266 /* This zaps the entire block. Bottom up. */
4267 BUFFER_TRACE(bh, "free child branches");
4268 ext4_free_branches(handle, inode, bh,
4269 (__le32 *) bh->b_data,
4270 (__le32 *) bh->b_data + addr_per_block,
4271 depth);
4272
4273 /*
4274 * We've probably journalled the indirect block several
4275 * times during the truncate. But it's no longer
4276 * needed and we now drop it from the transaction via
4277 * jbd2_journal_revoke().
4278 *
4279 * That's easy if it's exclusively part of this
4280 * transaction. But if it's part of the committing
4281 * transaction then jbd2_journal_forget() will simply
4282 * brelse() it. That means that if the underlying
4283 * block is reallocated in ext4_get_block(),
4284 * unmap_underlying_metadata() will find this block
4285 * and will try to get rid of it. damn, damn.
4286 *
4287 * If this block has already been committed to the
4288 * journal, a revoke record will be written. And
4289 * revoke records must be emitted *before* clearing
4290 * this block's bit in the bitmaps.
4291 */
4292 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
4293
4294 /*
4295 * Everything below this this pointer has been
4296 * released. Now let this top-of-subtree go.
4297 *
4298 * We want the freeing of this indirect block to be
4299 * atomic in the journal with the updating of the
4300 * bitmap block which owns it. So make some room in
4301 * the journal.
4302 *
4303 * We zero the parent pointer *after* freeing its
4304 * pointee in the bitmaps, so if extend_transaction()
4305 * for some reason fails to put the bitmap changes and
4306 * the release into the same transaction, recovery
4307 * will merely complain about releasing a free block,
4308 * rather than leaking blocks.
4309 */
4310 if (ext4_handle_is_aborted(handle))
4311 return;
4312 if (try_to_extend_transaction(handle, inode)) {
4313 ext4_mark_inode_dirty(handle, inode);
4314 ext4_truncate_restart_trans(handle, inode,
4315 blocks_for_truncate(inode));
4316 }
4317
4318 ext4_free_blocks(handle, inode, 0, nr, 1,
4319 EXT4_FREE_BLOCKS_METADATA);
4320
4321 if (parent_bh) {
4322 /*
4323 * The block which we have just freed is
4324 * pointed to by an indirect block: journal it
4325 */
4326 BUFFER_TRACE(parent_bh, "get_write_access");
4327 if (!ext4_journal_get_write_access(handle,
4328 parent_bh)){
4329 *p = 0;
4330 BUFFER_TRACE(parent_bh,
4331 "call ext4_handle_dirty_metadata");
4332 ext4_handle_dirty_metadata(handle,
4333 inode,
4334 parent_bh);
4335 }
4336 }
4337 }
4338 } else {
4339 /* We have reached the bottom of the tree. */
4340 BUFFER_TRACE(parent_bh, "free data blocks");
4341 ext4_free_data(handle, inode, parent_bh, first, last);
4342 }
4343 }
4344
4345 int ext4_can_truncate(struct inode *inode)
4346 {
4347 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4348 return 0;
4349 if (S_ISREG(inode->i_mode))
4350 return 1;
4351 if (S_ISDIR(inode->i_mode))
4352 return 1;
4353 if (S_ISLNK(inode->i_mode))
4354 return !ext4_inode_is_fast_symlink(inode);
4355 return 0;
4356 }
4357
4358 /*
4359 * ext4_truncate()
4360 *
4361 * We block out ext4_get_block() block instantiations across the entire
4362 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4363 * simultaneously on behalf of the same inode.
4364 *
4365 * As we work through the truncate and commmit bits of it to the journal there
4366 * is one core, guiding principle: the file's tree must always be consistent on
4367 * disk. We must be able to restart the truncate after a crash.
4368 *
4369 * The file's tree may be transiently inconsistent in memory (although it
4370 * probably isn't), but whenever we close off and commit a journal transaction,
4371 * the contents of (the filesystem + the journal) must be consistent and
4372 * restartable. It's pretty simple, really: bottom up, right to left (although
4373 * left-to-right works OK too).
4374 *
4375 * Note that at recovery time, journal replay occurs *before* the restart of
4376 * truncate against the orphan inode list.
4377 *
4378 * The committed inode has the new, desired i_size (which is the same as
4379 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4380 * that this inode's truncate did not complete and it will again call
4381 * ext4_truncate() to have another go. So there will be instantiated blocks
4382 * to the right of the truncation point in a crashed ext4 filesystem. But
4383 * that's fine - as long as they are linked from the inode, the post-crash
4384 * ext4_truncate() run will find them and release them.
4385 */
4386 void ext4_truncate(struct inode *inode)
4387 {
4388 handle_t *handle;
4389 struct ext4_inode_info *ei = EXT4_I(inode);
4390 __le32 *i_data = ei->i_data;
4391 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4392 struct address_space *mapping = inode->i_mapping;
4393 ext4_lblk_t offsets[4];
4394 Indirect chain[4];
4395 Indirect *partial;
4396 __le32 nr = 0;
4397 int n;
4398 ext4_lblk_t last_block;
4399 unsigned blocksize = inode->i_sb->s_blocksize;
4400
4401 if (!ext4_can_truncate(inode))
4402 return;
4403
4404 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4405 ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
4406
4407 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4408 ext4_ext_truncate(inode);
4409 return;
4410 }
4411
4412 handle = start_transaction(inode);
4413 if (IS_ERR(handle))
4414 return; /* AKPM: return what? */
4415
4416 last_block = (inode->i_size + blocksize-1)
4417 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4418
4419 if (inode->i_size & (blocksize - 1))
4420 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4421 goto out_stop;
4422
4423 n = ext4_block_to_path(inode, last_block, offsets, NULL);
4424 if (n == 0)
4425 goto out_stop; /* error */
4426
4427 /*
4428 * OK. This truncate is going to happen. We add the inode to the
4429 * orphan list, so that if this truncate spans multiple transactions,
4430 * and we crash, we will resume the truncate when the filesystem
4431 * recovers. It also marks the inode dirty, to catch the new size.
4432 *
4433 * Implication: the file must always be in a sane, consistent
4434 * truncatable state while each transaction commits.
4435 */
4436 if (ext4_orphan_add(handle, inode))
4437 goto out_stop;
4438
4439 /*
4440 * From here we block out all ext4_get_block() callers who want to
4441 * modify the block allocation tree.
4442 */
4443 down_write(&ei->i_data_sem);
4444
4445 ext4_discard_preallocations(inode);
4446
4447 /*
4448 * The orphan list entry will now protect us from any crash which
4449 * occurs before the truncate completes, so it is now safe to propagate
4450 * the new, shorter inode size (held for now in i_size) into the
4451 * on-disk inode. We do this via i_disksize, which is the value which
4452 * ext4 *really* writes onto the disk inode.
4453 */
4454 ei->i_disksize = inode->i_size;
4455
4456 if (n == 1) { /* direct blocks */
4457 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4458 i_data + EXT4_NDIR_BLOCKS);
4459 goto do_indirects;
4460 }
4461
4462 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4463 /* Kill the top of shared branch (not detached) */
4464 if (nr) {
4465 if (partial == chain) {
4466 /* Shared branch grows from the inode */
4467 ext4_free_branches(handle, inode, NULL,
4468 &nr, &nr+1, (chain+n-1) - partial);
4469 *partial->p = 0;
4470 /*
4471 * We mark the inode dirty prior to restart,
4472 * and prior to stop. No need for it here.
4473 */
4474 } else {
4475 /* Shared branch grows from an indirect block */
4476 BUFFER_TRACE(partial->bh, "get_write_access");
4477 ext4_free_branches(handle, inode, partial->bh,
4478 partial->p,
4479 partial->p+1, (chain+n-1) - partial);
4480 }
4481 }
4482 /* Clear the ends of indirect blocks on the shared branch */
4483 while (partial > chain) {
4484 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
4485 (__le32*)partial->bh->b_data+addr_per_block,
4486 (chain+n-1) - partial);
4487 BUFFER_TRACE(partial->bh, "call brelse");
4488 brelse(partial->bh);
4489 partial--;
4490 }
4491 do_indirects:
4492 /* Kill the remaining (whole) subtrees */
4493 switch (offsets[0]) {
4494 default:
4495 nr = i_data[EXT4_IND_BLOCK];
4496 if (nr) {
4497 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4498 i_data[EXT4_IND_BLOCK] = 0;
4499 }
4500 case EXT4_IND_BLOCK:
4501 nr = i_data[EXT4_DIND_BLOCK];
4502 if (nr) {
4503 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4504 i_data[EXT4_DIND_BLOCK] = 0;
4505 }
4506 case EXT4_DIND_BLOCK:
4507 nr = i_data[EXT4_TIND_BLOCK];
4508 if (nr) {
4509 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4510 i_data[EXT4_TIND_BLOCK] = 0;
4511 }
4512 case EXT4_TIND_BLOCK:
4513 ;
4514 }
4515
4516 up_write(&ei->i_data_sem);
4517 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4518 ext4_mark_inode_dirty(handle, inode);
4519
4520 /*
4521 * In a multi-transaction truncate, we only make the final transaction
4522 * synchronous
4523 */
4524 if (IS_SYNC(inode))
4525 ext4_handle_sync(handle);
4526 out_stop:
4527 /*
4528 * If this was a simple ftruncate(), and the file will remain alive
4529 * then we need to clear up the orphan record which we created above.
4530 * However, if this was a real unlink then we were called by
4531 * ext4_delete_inode(), and we allow that function to clean up the
4532 * orphan info for us.
4533 */
4534 if (inode->i_nlink)
4535 ext4_orphan_del(handle, inode);
4536
4537 ext4_journal_stop(handle);
4538 }
4539
4540 /*
4541 * ext4_get_inode_loc returns with an extra refcount against the inode's
4542 * underlying buffer_head on success. If 'in_mem' is true, we have all
4543 * data in memory that is needed to recreate the on-disk version of this
4544 * inode.
4545 */
4546 static int __ext4_get_inode_loc(struct inode *inode,
4547 struct ext4_iloc *iloc, int in_mem)
4548 {
4549 struct ext4_group_desc *gdp;
4550 struct buffer_head *bh;
4551 struct super_block *sb = inode->i_sb;
4552 ext4_fsblk_t block;
4553 int inodes_per_block, inode_offset;
4554
4555 iloc->bh = NULL;
4556 if (!ext4_valid_inum(sb, inode->i_ino))
4557 return -EIO;
4558
4559 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4560 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4561 if (!gdp)
4562 return -EIO;
4563
4564 /*
4565 * Figure out the offset within the block group inode table
4566 */
4567 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
4568 inode_offset = ((inode->i_ino - 1) %
4569 EXT4_INODES_PER_GROUP(sb));
4570 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4571 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4572
4573 bh = sb_getblk(sb, block);
4574 if (!bh) {
4575 ext4_error(sb, "ext4_get_inode_loc", "unable to read "
4576 "inode block - inode=%lu, block=%llu",
4577 inode->i_ino, block);
4578 return -EIO;
4579 }
4580 if (!buffer_uptodate(bh)) {
4581 lock_buffer(bh);
4582
4583 /*
4584 * If the buffer has the write error flag, we have failed
4585 * to write out another inode in the same block. In this
4586 * case, we don't have to read the block because we may
4587 * read the old inode data successfully.
4588 */
4589 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4590 set_buffer_uptodate(bh);
4591
4592 if (buffer_uptodate(bh)) {
4593 /* someone brought it uptodate while we waited */
4594 unlock_buffer(bh);
4595 goto has_buffer;
4596 }
4597
4598 /*
4599 * If we have all information of the inode in memory and this
4600 * is the only valid inode in the block, we need not read the
4601 * block.
4602 */
4603 if (in_mem) {
4604 struct buffer_head *bitmap_bh;
4605 int i, start;
4606
4607 start = inode_offset & ~(inodes_per_block - 1);
4608
4609 /* Is the inode bitmap in cache? */
4610 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4611 if (!bitmap_bh)
4612 goto make_io;
4613
4614 /*
4615 * If the inode bitmap isn't in cache then the
4616 * optimisation may end up performing two reads instead
4617 * of one, so skip it.
4618 */
4619 if (!buffer_uptodate(bitmap_bh)) {
4620 brelse(bitmap_bh);
4621 goto make_io;
4622 }
4623 for (i = start; i < start + inodes_per_block; i++) {
4624 if (i == inode_offset)
4625 continue;
4626 if (ext4_test_bit(i, bitmap_bh->b_data))
4627 break;
4628 }
4629 brelse(bitmap_bh);
4630 if (i == start + inodes_per_block) {
4631 /* all other inodes are free, so skip I/O */
4632 memset(bh->b_data, 0, bh->b_size);
4633 set_buffer_uptodate(bh);
4634 unlock_buffer(bh);
4635 goto has_buffer;
4636 }
4637 }
4638
4639 make_io:
4640 /*
4641 * If we need to do any I/O, try to pre-readahead extra
4642 * blocks from the inode table.
4643 */
4644 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4645 ext4_fsblk_t b, end, table;
4646 unsigned num;
4647
4648 table = ext4_inode_table(sb, gdp);
4649 /* s_inode_readahead_blks is always a power of 2 */
4650 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4651 if (table > b)
4652 b = table;
4653 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4654 num = EXT4_INODES_PER_GROUP(sb);
4655 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4656 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
4657 num -= ext4_itable_unused_count(sb, gdp);
4658 table += num / inodes_per_block;
4659 if (end > table)
4660 end = table;
4661 while (b <= end)
4662 sb_breadahead(sb, b++);
4663 }
4664
4665 /*
4666 * There are other valid inodes in the buffer, this inode
4667 * has in-inode xattrs, or we don't have this inode in memory.
4668 * Read the block from disk.
4669 */
4670 get_bh(bh);
4671 bh->b_end_io = end_buffer_read_sync;
4672 submit_bh(READ_META, bh);
4673 wait_on_buffer(bh);
4674 if (!buffer_uptodate(bh)) {
4675 ext4_error(sb, __func__,
4676 "unable to read inode block - inode=%lu, "
4677 "block=%llu", inode->i_ino, block);
4678 brelse(bh);
4679 return -EIO;
4680 }
4681 }
4682 has_buffer:
4683 iloc->bh = bh;
4684 return 0;
4685 }
4686
4687 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4688 {
4689 /* We have all inode data except xattrs in memory here. */
4690 return __ext4_get_inode_loc(inode, iloc,
4691 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
4692 }
4693
4694 void ext4_set_inode_flags(struct inode *inode)
4695 {
4696 unsigned int flags = EXT4_I(inode)->i_flags;
4697
4698 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4699 if (flags & EXT4_SYNC_FL)
4700 inode->i_flags |= S_SYNC;
4701 if (flags & EXT4_APPEND_FL)
4702 inode->i_flags |= S_APPEND;
4703 if (flags & EXT4_IMMUTABLE_FL)
4704 inode->i_flags |= S_IMMUTABLE;
4705 if (flags & EXT4_NOATIME_FL)
4706 inode->i_flags |= S_NOATIME;
4707 if (flags & EXT4_DIRSYNC_FL)
4708 inode->i_flags |= S_DIRSYNC;
4709 }
4710
4711 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4712 void ext4_get_inode_flags(struct ext4_inode_info *ei)
4713 {
4714 unsigned int flags = ei->vfs_inode.i_flags;
4715
4716 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4717 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
4718 if (flags & S_SYNC)
4719 ei->i_flags |= EXT4_SYNC_FL;
4720 if (flags & S_APPEND)
4721 ei->i_flags |= EXT4_APPEND_FL;
4722 if (flags & S_IMMUTABLE)
4723 ei->i_flags |= EXT4_IMMUTABLE_FL;
4724 if (flags & S_NOATIME)
4725 ei->i_flags |= EXT4_NOATIME_FL;
4726 if (flags & S_DIRSYNC)
4727 ei->i_flags |= EXT4_DIRSYNC_FL;
4728 }
4729
4730 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4731 struct ext4_inode_info *ei)
4732 {
4733 blkcnt_t i_blocks ;
4734 struct inode *inode = &(ei->vfs_inode);
4735 struct super_block *sb = inode->i_sb;
4736
4737 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4738 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4739 /* we are using combined 48 bit field */
4740 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4741 le32_to_cpu(raw_inode->i_blocks_lo);
4742 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
4743 /* i_blocks represent file system block size */
4744 return i_blocks << (inode->i_blkbits - 9);
4745 } else {
4746 return i_blocks;
4747 }
4748 } else {
4749 return le32_to_cpu(raw_inode->i_blocks_lo);
4750 }
4751 }
4752
4753 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4754 {
4755 struct ext4_iloc iloc;
4756 struct ext4_inode *raw_inode;
4757 struct ext4_inode_info *ei;
4758 struct inode *inode;
4759 journal_t *journal = EXT4_SB(sb)->s_journal;
4760 long ret;
4761 int block;
4762
4763 inode = iget_locked(sb, ino);
4764 if (!inode)
4765 return ERR_PTR(-ENOMEM);
4766 if (!(inode->i_state & I_NEW))
4767 return inode;
4768
4769 ei = EXT4_I(inode);
4770 iloc.bh = 0;
4771
4772 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4773 if (ret < 0)
4774 goto bad_inode;
4775 raw_inode = ext4_raw_inode(&iloc);
4776 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4777 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4778 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4779 if (!(test_opt(inode->i_sb, NO_UID32))) {
4780 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4781 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4782 }
4783 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4784
4785 ei->i_state = 0;
4786 ei->i_dir_start_lookup = 0;
4787 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4788 /* We now have enough fields to check if the inode was active or not.
4789 * This is needed because nfsd might try to access dead inodes
4790 * the test is that same one that e2fsck uses
4791 * NeilBrown 1999oct15
4792 */
4793 if (inode->i_nlink == 0) {
4794 if (inode->i_mode == 0 ||
4795 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4796 /* this inode is deleted */
4797 ret = -ESTALE;
4798 goto bad_inode;
4799 }
4800 /* The only unlinked inodes we let through here have
4801 * valid i_mode and are being read by the orphan
4802 * recovery code: that's fine, we're about to complete
4803 * the process of deleting those. */
4804 }
4805 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4806 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4807 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4808 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4809 ei->i_file_acl |=
4810 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4811 inode->i_size = ext4_isize(raw_inode);
4812 ei->i_disksize = inode->i_size;
4813 #ifdef CONFIG_QUOTA
4814 ei->i_reserved_quota = 0;
4815 #endif
4816 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4817 ei->i_block_group = iloc.block_group;
4818 ei->i_last_alloc_group = ~0;
4819 /*
4820 * NOTE! The in-memory inode i_data array is in little-endian order
4821 * even on big-endian machines: we do NOT byteswap the block numbers!
4822 */
4823 for (block = 0; block < EXT4_N_BLOCKS; block++)
4824 ei->i_data[block] = raw_inode->i_block[block];
4825 INIT_LIST_HEAD(&ei->i_orphan);
4826
4827 /*
4828 * Set transaction id's of transactions that have to be committed
4829 * to finish f[data]sync. We set them to currently running transaction
4830 * as we cannot be sure that the inode or some of its metadata isn't
4831 * part of the transaction - the inode could have been reclaimed and
4832 * now it is reread from disk.
4833 */
4834 if (journal) {
4835 transaction_t *transaction;
4836 tid_t tid;
4837
4838 spin_lock(&journal->j_state_lock);
4839 if (journal->j_running_transaction)
4840 transaction = journal->j_running_transaction;
4841 else
4842 transaction = journal->j_committing_transaction;
4843 if (transaction)
4844 tid = transaction->t_tid;
4845 else
4846 tid = journal->j_commit_sequence;
4847 spin_unlock(&journal->j_state_lock);
4848 ei->i_sync_tid = tid;
4849 ei->i_datasync_tid = tid;
4850 }
4851
4852 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4853 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4854 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4855 EXT4_INODE_SIZE(inode->i_sb)) {
4856 ret = -EIO;
4857 goto bad_inode;
4858 }
4859 if (ei->i_extra_isize == 0) {
4860 /* The extra space is currently unused. Use it. */
4861 ei->i_extra_isize = sizeof(struct ext4_inode) -
4862 EXT4_GOOD_OLD_INODE_SIZE;
4863 } else {
4864 __le32 *magic = (void *)raw_inode +
4865 EXT4_GOOD_OLD_INODE_SIZE +
4866 ei->i_extra_isize;
4867 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4868 ei->i_state |= EXT4_STATE_XATTR;
4869 }
4870 } else
4871 ei->i_extra_isize = 0;
4872
4873 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4874 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4875 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4876 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4877
4878 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4879 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4880 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4881 inode->i_version |=
4882 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4883 }
4884
4885 ret = 0;
4886 if (ei->i_file_acl &&
4887 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4888 ext4_error(sb, __func__,
4889 "bad extended attribute block %llu in inode #%lu",
4890 ei->i_file_acl, inode->i_ino);
4891 ret = -EIO;
4892 goto bad_inode;
4893 } else if (ei->i_flags & EXT4_EXTENTS_FL) {
4894 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4895 (S_ISLNK(inode->i_mode) &&
4896 !ext4_inode_is_fast_symlink(inode)))
4897 /* Validate extent which is part of inode */
4898 ret = ext4_ext_check_inode(inode);
4899 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4900 (S_ISLNK(inode->i_mode) &&
4901 !ext4_inode_is_fast_symlink(inode))) {
4902 /* Validate block references which are part of inode */
4903 ret = ext4_check_inode_blockref(inode);
4904 }
4905 if (ret)
4906 goto bad_inode;
4907
4908 if (S_ISREG(inode->i_mode)) {
4909 inode->i_op = &ext4_file_inode_operations;
4910 inode->i_fop = &ext4_file_operations;
4911 ext4_set_aops(inode);
4912 } else if (S_ISDIR(inode->i_mode)) {
4913 inode->i_op = &ext4_dir_inode_operations;
4914 inode->i_fop = &ext4_dir_operations;
4915 } else if (S_ISLNK(inode->i_mode)) {
4916 if (ext4_inode_is_fast_symlink(inode)) {
4917 inode->i_op = &ext4_fast_symlink_inode_operations;
4918 nd_terminate_link(ei->i_data, inode->i_size,
4919 sizeof(ei->i_data) - 1);
4920 } else {
4921 inode->i_op = &ext4_symlink_inode_operations;
4922 ext4_set_aops(inode);
4923 }
4924 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4925 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4926 inode->i_op = &ext4_special_inode_operations;
4927 if (raw_inode->i_block[0])
4928 init_special_inode(inode, inode->i_mode,
4929 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4930 else
4931 init_special_inode(inode, inode->i_mode,
4932 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4933 } else {
4934 ret = -EIO;
4935 ext4_error(inode->i_sb, __func__,
4936 "bogus i_mode (%o) for inode=%lu",
4937 inode->i_mode, inode->i_ino);
4938 goto bad_inode;
4939 }
4940 brelse(iloc.bh);
4941 ext4_set_inode_flags(inode);
4942 unlock_new_inode(inode);
4943 return inode;
4944
4945 bad_inode:
4946 brelse(iloc.bh);
4947 iget_failed(inode);
4948 return ERR_PTR(ret);
4949 }
4950
4951 static int ext4_inode_blocks_set(handle_t *handle,
4952 struct ext4_inode *raw_inode,
4953 struct ext4_inode_info *ei)
4954 {
4955 struct inode *inode = &(ei->vfs_inode);
4956 u64 i_blocks = inode->i_blocks;
4957 struct super_block *sb = inode->i_sb;
4958
4959 if (i_blocks <= ~0U) {
4960 /*
4961 * i_blocks can be represnted in a 32 bit variable
4962 * as multiple of 512 bytes
4963 */
4964 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4965 raw_inode->i_blocks_high = 0;
4966 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4967 return 0;
4968 }
4969 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4970 return -EFBIG;
4971
4972 if (i_blocks <= 0xffffffffffffULL) {
4973 /*
4974 * i_blocks can be represented in a 48 bit variable
4975 * as multiple of 512 bytes
4976 */
4977 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4978 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4979 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
4980 } else {
4981 ei->i_flags |= EXT4_HUGE_FILE_FL;
4982 /* i_block is stored in file system block size */
4983 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4984 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4985 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4986 }
4987 return 0;
4988 }
4989
4990 /*
4991 * Post the struct inode info into an on-disk inode location in the
4992 * buffer-cache. This gobbles the caller's reference to the
4993 * buffer_head in the inode location struct.
4994 *
4995 * The caller must have write access to iloc->bh.
4996 */
4997 static int ext4_do_update_inode(handle_t *handle,
4998 struct inode *inode,
4999 struct ext4_iloc *iloc)
5000 {
5001 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5002 struct ext4_inode_info *ei = EXT4_I(inode);
5003 struct buffer_head *bh = iloc->bh;
5004 int err = 0, rc, block;
5005
5006 /* For fields not not tracking in the in-memory inode,
5007 * initialise them to zero for new inodes. */
5008 if (ei->i_state & EXT4_STATE_NEW)
5009 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5010
5011 ext4_get_inode_flags(ei);
5012 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5013 if (!(test_opt(inode->i_sb, NO_UID32))) {
5014 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
5015 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
5016 /*
5017 * Fix up interoperability with old kernels. Otherwise, old inodes get
5018 * re-used with the upper 16 bits of the uid/gid intact
5019 */
5020 if (!ei->i_dtime) {
5021 raw_inode->i_uid_high =
5022 cpu_to_le16(high_16_bits(inode->i_uid));
5023 raw_inode->i_gid_high =
5024 cpu_to_le16(high_16_bits(inode->i_gid));
5025 } else {
5026 raw_inode->i_uid_high = 0;
5027 raw_inode->i_gid_high = 0;
5028 }
5029 } else {
5030 raw_inode->i_uid_low =
5031 cpu_to_le16(fs_high2lowuid(inode->i_uid));
5032 raw_inode->i_gid_low =
5033 cpu_to_le16(fs_high2lowgid(inode->i_gid));
5034 raw_inode->i_uid_high = 0;
5035 raw_inode->i_gid_high = 0;
5036 }
5037 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5038
5039 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5040 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5041 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5042 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5043
5044 if (ext4_inode_blocks_set(handle, raw_inode, ei))
5045 goto out_brelse;
5046 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5047 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
5048 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
5049 cpu_to_le32(EXT4_OS_HURD))
5050 raw_inode->i_file_acl_high =
5051 cpu_to_le16(ei->i_file_acl >> 32);
5052 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5053 ext4_isize_set(raw_inode, ei->i_disksize);
5054 if (ei->i_disksize > 0x7fffffffULL) {
5055 struct super_block *sb = inode->i_sb;
5056 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
5057 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
5058 EXT4_SB(sb)->s_es->s_rev_level ==
5059 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
5060 /* If this is the first large file
5061 * created, add a flag to the superblock.
5062 */
5063 err = ext4_journal_get_write_access(handle,
5064 EXT4_SB(sb)->s_sbh);
5065 if (err)
5066 goto out_brelse;
5067 ext4_update_dynamic_rev(sb);
5068 EXT4_SET_RO_COMPAT_FEATURE(sb,
5069 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
5070 sb->s_dirt = 1;
5071 ext4_handle_sync(handle);
5072 err = ext4_handle_dirty_metadata(handle, inode,
5073 EXT4_SB(sb)->s_sbh);
5074 }
5075 }
5076 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5077 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5078 if (old_valid_dev(inode->i_rdev)) {
5079 raw_inode->i_block[0] =
5080 cpu_to_le32(old_encode_dev(inode->i_rdev));
5081 raw_inode->i_block[1] = 0;
5082 } else {
5083 raw_inode->i_block[0] = 0;
5084 raw_inode->i_block[1] =
5085 cpu_to_le32(new_encode_dev(inode->i_rdev));
5086 raw_inode->i_block[2] = 0;
5087 }
5088 } else
5089 for (block = 0; block < EXT4_N_BLOCKS; block++)
5090 raw_inode->i_block[block] = ei->i_data[block];
5091
5092 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
5093 if (ei->i_extra_isize) {
5094 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5095 raw_inode->i_version_hi =
5096 cpu_to_le32(inode->i_version >> 32);
5097 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
5098 }
5099
5100 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5101 rc = ext4_handle_dirty_metadata(handle, inode, bh);
5102 if (!err)
5103 err = rc;
5104 ei->i_state &= ~EXT4_STATE_NEW;
5105
5106 ext4_update_inode_fsync_trans(handle, inode, 0);
5107 out_brelse:
5108 brelse(bh);
5109 ext4_std_error(inode->i_sb, err);
5110 return err;
5111 }
5112
5113 /*
5114 * ext4_write_inode()
5115 *
5116 * We are called from a few places:
5117 *
5118 * - Within generic_file_write() for O_SYNC files.
5119 * Here, there will be no transaction running. We wait for any running
5120 * trasnaction to commit.
5121 *
5122 * - Within sys_sync(), kupdate and such.
5123 * We wait on commit, if tol to.
5124 *
5125 * - Within prune_icache() (PF_MEMALLOC == true)
5126 * Here we simply return. We can't afford to block kswapd on the
5127 * journal commit.
5128 *
5129 * In all cases it is actually safe for us to return without doing anything,
5130 * because the inode has been copied into a raw inode buffer in
5131 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
5132 * knfsd.
5133 *
5134 * Note that we are absolutely dependent upon all inode dirtiers doing the
5135 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5136 * which we are interested.
5137 *
5138 * It would be a bug for them to not do this. The code:
5139 *
5140 * mark_inode_dirty(inode)
5141 * stuff();
5142 * inode->i_size = expr;
5143 *
5144 * is in error because a kswapd-driven write_inode() could occur while
5145 * `stuff()' is running, and the new i_size will be lost. Plus the inode
5146 * will no longer be on the superblock's dirty inode list.
5147 */
5148 int ext4_write_inode(struct inode *inode, int wait)
5149 {
5150 int err;
5151
5152 if (current->flags & PF_MEMALLOC)
5153 return 0;
5154
5155 if (EXT4_SB(inode->i_sb)->s_journal) {
5156 if (ext4_journal_current_handle()) {
5157 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5158 dump_stack();
5159 return -EIO;
5160 }
5161
5162 if (!wait)
5163 return 0;
5164
5165 err = ext4_force_commit(inode->i_sb);
5166 } else {
5167 struct ext4_iloc iloc;
5168
5169 err = ext4_get_inode_loc(inode, &iloc);
5170 if (err)
5171 return err;
5172 if (wait)
5173 sync_dirty_buffer(iloc.bh);
5174 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5175 ext4_error(inode->i_sb, __func__,
5176 "IO error syncing inode, "
5177 "inode=%lu, block=%llu",
5178 inode->i_ino,
5179 (unsigned long long)iloc.bh->b_blocknr);
5180 err = -EIO;
5181 }
5182 }
5183 return err;
5184 }
5185
5186 /*
5187 * ext4_setattr()
5188 *
5189 * Called from notify_change.
5190 *
5191 * We want to trap VFS attempts to truncate the file as soon as
5192 * possible. In particular, we want to make sure that when the VFS
5193 * shrinks i_size, we put the inode on the orphan list and modify
5194 * i_disksize immediately, so that during the subsequent flushing of
5195 * dirty pages and freeing of disk blocks, we can guarantee that any
5196 * commit will leave the blocks being flushed in an unused state on
5197 * disk. (On recovery, the inode will get truncated and the blocks will
5198 * be freed, so we have a strong guarantee that no future commit will
5199 * leave these blocks visible to the user.)
5200 *
5201 * Another thing we have to assure is that if we are in ordered mode
5202 * and inode is still attached to the committing transaction, we must
5203 * we start writeout of all the dirty pages which are being truncated.
5204 * This way we are sure that all the data written in the previous
5205 * transaction are already on disk (truncate waits for pages under
5206 * writeback).
5207 *
5208 * Called with inode->i_mutex down.
5209 */
5210 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5211 {
5212 struct inode *inode = dentry->d_inode;
5213 int error, rc = 0;
5214 const unsigned int ia_valid = attr->ia_valid;
5215
5216 error = inode_change_ok(inode, attr);
5217 if (error)
5218 return error;
5219
5220 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
5221 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
5222 handle_t *handle;
5223
5224 /* (user+group)*(old+new) structure, inode write (sb,
5225 * inode block, ? - but truncate inode update has it) */
5226 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
5227 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
5228 if (IS_ERR(handle)) {
5229 error = PTR_ERR(handle);
5230 goto err_out;
5231 }
5232 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
5233 if (error) {
5234 ext4_journal_stop(handle);
5235 return error;
5236 }
5237 /* Update corresponding info in inode so that everything is in
5238 * one transaction */
5239 if (attr->ia_valid & ATTR_UID)
5240 inode->i_uid = attr->ia_uid;
5241 if (attr->ia_valid & ATTR_GID)
5242 inode->i_gid = attr->ia_gid;
5243 error = ext4_mark_inode_dirty(handle, inode);
5244 ext4_journal_stop(handle);
5245 }
5246
5247 if (attr->ia_valid & ATTR_SIZE) {
5248 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
5249 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5250
5251 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5252 error = -EFBIG;
5253 goto err_out;
5254 }
5255 }
5256 }
5257
5258 if (S_ISREG(inode->i_mode) &&
5259 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
5260 handle_t *handle;
5261
5262 handle = ext4_journal_start(inode, 3);
5263 if (IS_ERR(handle)) {
5264 error = PTR_ERR(handle);
5265 goto err_out;
5266 }
5267
5268 error = ext4_orphan_add(handle, inode);
5269 EXT4_I(inode)->i_disksize = attr->ia_size;
5270 rc = ext4_mark_inode_dirty(handle, inode);
5271 if (!error)
5272 error = rc;
5273 ext4_journal_stop(handle);
5274
5275 if (ext4_should_order_data(inode)) {
5276 error = ext4_begin_ordered_truncate(inode,
5277 attr->ia_size);
5278 if (error) {
5279 /* Do as much error cleanup as possible */
5280 handle = ext4_journal_start(inode, 3);
5281 if (IS_ERR(handle)) {
5282 ext4_orphan_del(NULL, inode);
5283 goto err_out;
5284 }
5285 ext4_orphan_del(handle, inode);
5286 ext4_journal_stop(handle);
5287 goto err_out;
5288 }
5289 }
5290 }
5291
5292 rc = inode_setattr(inode, attr);
5293
5294 /* If inode_setattr's call to ext4_truncate failed to get a
5295 * transaction handle at all, we need to clean up the in-core
5296 * orphan list manually. */
5297 if (inode->i_nlink)
5298 ext4_orphan_del(NULL, inode);
5299
5300 if (!rc && (ia_valid & ATTR_MODE))
5301 rc = ext4_acl_chmod(inode);
5302
5303 err_out:
5304 ext4_std_error(inode->i_sb, error);
5305 if (!error)
5306 error = rc;
5307 return error;
5308 }
5309
5310 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5311 struct kstat *stat)
5312 {
5313 struct inode *inode;
5314 unsigned long delalloc_blocks;
5315
5316 inode = dentry->d_inode;
5317 generic_fillattr(inode, stat);
5318
5319 /*
5320 * We can't update i_blocks if the block allocation is delayed
5321 * otherwise in the case of system crash before the real block
5322 * allocation is done, we will have i_blocks inconsistent with
5323 * on-disk file blocks.
5324 * We always keep i_blocks updated together with real
5325 * allocation. But to not confuse with user, stat
5326 * will return the blocks that include the delayed allocation
5327 * blocks for this file.
5328 */
5329 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
5330 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5331 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
5332
5333 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5334 return 0;
5335 }
5336
5337 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5338 int chunk)
5339 {
5340 int indirects;
5341
5342 /* if nrblocks are contiguous */
5343 if (chunk) {
5344 /*
5345 * With N contiguous data blocks, it need at most
5346 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
5347 * 2 dindirect blocks
5348 * 1 tindirect block
5349 */
5350 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
5351 return indirects + 3;
5352 }
5353 /*
5354 * if nrblocks are not contiguous, worse case, each block touch
5355 * a indirect block, and each indirect block touch a double indirect
5356 * block, plus a triple indirect block
5357 */
5358 indirects = nrblocks * 2 + 1;
5359 return indirects;
5360 }
5361
5362 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5363 {
5364 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
5365 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5366 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5367 }
5368
5369 /*
5370 * Account for index blocks, block groups bitmaps and block group
5371 * descriptor blocks if modify datablocks and index blocks
5372 * worse case, the indexs blocks spread over different block groups
5373 *
5374 * If datablocks are discontiguous, they are possible to spread over
5375 * different block groups too. If they are contiuguous, with flexbg,
5376 * they could still across block group boundary.
5377 *
5378 * Also account for superblock, inode, quota and xattr blocks
5379 */
5380 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5381 {
5382 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5383 int gdpblocks;
5384 int idxblocks;
5385 int ret = 0;
5386
5387 /*
5388 * How many index blocks need to touch to modify nrblocks?
5389 * The "Chunk" flag indicating whether the nrblocks is
5390 * physically contiguous on disk
5391 *
5392 * For Direct IO and fallocate, they calls get_block to allocate
5393 * one single extent at a time, so they could set the "Chunk" flag
5394 */
5395 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
5396
5397 ret = idxblocks;
5398
5399 /*
5400 * Now let's see how many group bitmaps and group descriptors need
5401 * to account
5402 */
5403 groups = idxblocks;
5404 if (chunk)
5405 groups += 1;
5406 else
5407 groups += nrblocks;
5408
5409 gdpblocks = groups;
5410 if (groups > ngroups)
5411 groups = ngroups;
5412 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5413 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5414
5415 /* bitmaps and block group descriptor blocks */
5416 ret += groups + gdpblocks;
5417
5418 /* Blocks for super block, inode, quota and xattr blocks */
5419 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5420
5421 return ret;
5422 }
5423
5424 /*
5425 * Calulate the total number of credits to reserve to fit
5426 * the modification of a single pages into a single transaction,
5427 * which may include multiple chunks of block allocations.
5428 *
5429 * This could be called via ext4_write_begin()
5430 *
5431 * We need to consider the worse case, when
5432 * one new block per extent.
5433 */
5434 int ext4_writepage_trans_blocks(struct inode *inode)
5435 {
5436 int bpp = ext4_journal_blocks_per_page(inode);
5437 int ret;
5438
5439 ret = ext4_meta_trans_blocks(inode, bpp, 0);
5440
5441 /* Account for data blocks for journalled mode */
5442 if (ext4_should_journal_data(inode))
5443 ret += bpp;
5444 return ret;
5445 }
5446
5447 /*
5448 * Calculate the journal credits for a chunk of data modification.
5449 *
5450 * This is called from DIO, fallocate or whoever calling
5451 * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
5452 *
5453 * journal buffers for data blocks are not included here, as DIO
5454 * and fallocate do no need to journal data buffers.
5455 */
5456 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5457 {
5458 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5459 }
5460
5461 /*
5462 * The caller must have previously called ext4_reserve_inode_write().
5463 * Give this, we know that the caller already has write access to iloc->bh.
5464 */
5465 int ext4_mark_iloc_dirty(handle_t *handle,
5466 struct inode *inode, struct ext4_iloc *iloc)
5467 {
5468 int err = 0;
5469
5470 if (test_opt(inode->i_sb, I_VERSION))
5471 inode_inc_iversion(inode);
5472
5473 /* the do_update_inode consumes one bh->b_count */
5474 get_bh(iloc->bh);
5475
5476 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5477 err = ext4_do_update_inode(handle, inode, iloc);
5478 put_bh(iloc->bh);
5479 return err;
5480 }
5481
5482 /*
5483 * On success, We end up with an outstanding reference count against
5484 * iloc->bh. This _must_ be cleaned up later.
5485 */
5486
5487 int
5488 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5489 struct ext4_iloc *iloc)
5490 {
5491 int err;
5492
5493 err = ext4_get_inode_loc(inode, iloc);
5494 if (!err) {
5495 BUFFER_TRACE(iloc->bh, "get_write_access");
5496 err = ext4_journal_get_write_access(handle, iloc->bh);
5497 if (err) {
5498 brelse(iloc->bh);
5499 iloc->bh = NULL;
5500 }
5501 }
5502 ext4_std_error(inode->i_sb, err);
5503 return err;
5504 }
5505
5506 /*
5507 * Expand an inode by new_extra_isize bytes.
5508 * Returns 0 on success or negative error number on failure.
5509 */
5510 static int ext4_expand_extra_isize(struct inode *inode,
5511 unsigned int new_extra_isize,
5512 struct ext4_iloc iloc,
5513 handle_t *handle)
5514 {
5515 struct ext4_inode *raw_inode;
5516 struct ext4_xattr_ibody_header *header;
5517 struct ext4_xattr_entry *entry;
5518
5519 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5520 return 0;
5521
5522 raw_inode = ext4_raw_inode(&iloc);
5523
5524 header = IHDR(inode, raw_inode);
5525 entry = IFIRST(header);
5526
5527 /* No extended attributes present */
5528 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
5529 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5530 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5531 new_extra_isize);
5532 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5533 return 0;
5534 }
5535
5536 /* try to expand with EAs present */
5537 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5538 raw_inode, handle);
5539 }
5540
5541 /*
5542 * What we do here is to mark the in-core inode as clean with respect to inode
5543 * dirtiness (it may still be data-dirty).
5544 * This means that the in-core inode may be reaped by prune_icache
5545 * without having to perform any I/O. This is a very good thing,
5546 * because *any* task may call prune_icache - even ones which
5547 * have a transaction open against a different journal.
5548 *
5549 * Is this cheating? Not really. Sure, we haven't written the
5550 * inode out, but prune_icache isn't a user-visible syncing function.
5551 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5552 * we start and wait on commits.
5553 *
5554 * Is this efficient/effective? Well, we're being nice to the system
5555 * by cleaning up our inodes proactively so they can be reaped
5556 * without I/O. But we are potentially leaving up to five seconds'
5557 * worth of inodes floating about which prune_icache wants us to
5558 * write out. One way to fix that would be to get prune_icache()
5559 * to do a write_super() to free up some memory. It has the desired
5560 * effect.
5561 */
5562 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5563 {
5564 struct ext4_iloc iloc;
5565 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5566 static unsigned int mnt_count;
5567 int err, ret;
5568
5569 might_sleep();
5570 err = ext4_reserve_inode_write(handle, inode, &iloc);
5571 if (ext4_handle_valid(handle) &&
5572 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5573 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
5574 /*
5575 * We need extra buffer credits since we may write into EA block
5576 * with this same handle. If journal_extend fails, then it will
5577 * only result in a minor loss of functionality for that inode.
5578 * If this is felt to be critical, then e2fsck should be run to
5579 * force a large enough s_min_extra_isize.
5580 */
5581 if ((jbd2_journal_extend(handle,
5582 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
5583 ret = ext4_expand_extra_isize(inode,
5584 sbi->s_want_extra_isize,
5585 iloc, handle);
5586 if (ret) {
5587 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
5588 if (mnt_count !=
5589 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5590 ext4_warning(inode->i_sb, __func__,
5591 "Unable to expand inode %lu. Delete"
5592 " some EAs or run e2fsck.",
5593 inode->i_ino);
5594 mnt_count =
5595 le16_to_cpu(sbi->s_es->s_mnt_count);
5596 }
5597 }
5598 }
5599 }
5600 if (!err)
5601 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5602 return err;
5603 }
5604
5605 /*
5606 * ext4_dirty_inode() is called from __mark_inode_dirty()
5607 *
5608 * We're really interested in the case where a file is being extended.
5609 * i_size has been changed by generic_commit_write() and we thus need
5610 * to include the updated inode in the current transaction.
5611 *
5612 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
5613 * are allocated to the file.
5614 *
5615 * If the inode is marked synchronous, we don't honour that here - doing
5616 * so would cause a commit on atime updates, which we don't bother doing.
5617 * We handle synchronous inodes at the highest possible level.
5618 */
5619 void ext4_dirty_inode(struct inode *inode)
5620 {
5621 handle_t *handle;
5622
5623 handle = ext4_journal_start(inode, 2);
5624 if (IS_ERR(handle))
5625 goto out;
5626
5627 ext4_mark_inode_dirty(handle, inode);
5628
5629 ext4_journal_stop(handle);
5630 out:
5631 return;
5632 }
5633
5634 #if 0
5635 /*
5636 * Bind an inode's backing buffer_head into this transaction, to prevent
5637 * it from being flushed to disk early. Unlike
5638 * ext4_reserve_inode_write, this leaves behind no bh reference and
5639 * returns no iloc structure, so the caller needs to repeat the iloc
5640 * lookup to mark the inode dirty later.
5641 */
5642 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5643 {
5644 struct ext4_iloc iloc;
5645
5646 int err = 0;
5647 if (handle) {
5648 err = ext4_get_inode_loc(inode, &iloc);
5649 if (!err) {
5650 BUFFER_TRACE(iloc.bh, "get_write_access");
5651 err = jbd2_journal_get_write_access(handle, iloc.bh);
5652 if (!err)
5653 err = ext4_handle_dirty_metadata(handle,
5654 inode,
5655 iloc.bh);
5656 brelse(iloc.bh);
5657 }
5658 }
5659 ext4_std_error(inode->i_sb, err);
5660 return err;
5661 }
5662 #endif
5663
5664 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5665 {
5666 journal_t *journal;
5667 handle_t *handle;
5668 int err;
5669
5670 /*
5671 * We have to be very careful here: changing a data block's
5672 * journaling status dynamically is dangerous. If we write a
5673 * data block to the journal, change the status and then delete
5674 * that block, we risk forgetting to revoke the old log record
5675 * from the journal and so a subsequent replay can corrupt data.
5676 * So, first we make sure that the journal is empty and that
5677 * nobody is changing anything.
5678 */
5679
5680 journal = EXT4_JOURNAL(inode);
5681 if (!journal)
5682 return 0;
5683 if (is_journal_aborted(journal))
5684 return -EROFS;
5685
5686 jbd2_journal_lock_updates(journal);
5687 jbd2_journal_flush(journal);
5688
5689 /*
5690 * OK, there are no updates running now, and all cached data is
5691 * synced to disk. We are now in a completely consistent state
5692 * which doesn't have anything in the journal, and we know that
5693 * no filesystem updates are running, so it is safe to modify
5694 * the inode's in-core data-journaling state flag now.
5695 */
5696
5697 if (val)
5698 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
5699 else
5700 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5701 ext4_set_aops(inode);
5702
5703 jbd2_journal_unlock_updates(journal);
5704
5705 /* Finally we can mark the inode as dirty. */
5706
5707 handle = ext4_journal_start(inode, 1);
5708 if (IS_ERR(handle))
5709 return PTR_ERR(handle);
5710
5711 err = ext4_mark_inode_dirty(handle, inode);
5712 ext4_handle_sync(handle);
5713 ext4_journal_stop(handle);
5714 ext4_std_error(inode->i_sb, err);
5715
5716 return err;
5717 }
5718
5719 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5720 {
5721 return !buffer_mapped(bh);
5722 }
5723
5724 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5725 {
5726 struct page *page = vmf->page;
5727 loff_t size;
5728 unsigned long len;
5729 int ret = -EINVAL;
5730 void *fsdata;
5731 struct file *file = vma->vm_file;
5732 struct inode *inode = file->f_path.dentry->d_inode;
5733 struct address_space *mapping = inode->i_mapping;
5734
5735 /*
5736 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5737 * get i_mutex because we are already holding mmap_sem.
5738 */
5739 down_read(&inode->i_alloc_sem);
5740 size = i_size_read(inode);
5741 if (page->mapping != mapping || size <= page_offset(page)
5742 || !PageUptodate(page)) {
5743 /* page got truncated from under us? */
5744 goto out_unlock;
5745 }
5746 ret = 0;
5747 if (PageMappedToDisk(page))
5748 goto out_unlock;
5749
5750 if (page->index == size >> PAGE_CACHE_SHIFT)
5751 len = size & ~PAGE_CACHE_MASK;
5752 else
5753 len = PAGE_CACHE_SIZE;
5754
5755 lock_page(page);
5756 /*
5757 * return if we have all the buffers mapped. This avoid
5758 * the need to call write_begin/write_end which does a
5759 * journal_start/journal_stop which can block and take
5760 * long time
5761 */
5762 if (page_has_buffers(page)) {
5763 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5764 ext4_bh_unmapped)) {
5765 unlock_page(page);
5766 goto out_unlock;
5767 }
5768 }
5769 unlock_page(page);
5770 /*
5771 * OK, we need to fill the hole... Do write_begin write_end
5772 * to do block allocation/reservation.We are not holding
5773 * inode.i__mutex here. That allow * parallel write_begin,
5774 * write_end call. lock_page prevent this from happening
5775 * on the same page though
5776 */
5777 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
5778 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
5779 if (ret < 0)
5780 goto out_unlock;
5781 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
5782 len, len, page, fsdata);
5783 if (ret < 0)
5784 goto out_unlock;
5785 ret = 0;
5786 out_unlock:
5787 if (ret)
5788 ret = VM_FAULT_SIGBUS;
5789 up_read(&inode->i_alloc_sem);
5790 return ret;
5791 }
This page took 0.23454 seconds and 5 git commands to generate.