switch ext3 to inode->i_acl
[deliverable/linux.git] / fs / ext3 / inode.c
1 /*
2 * linux/fs/ext3/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
21 *
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23 */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include <linux/fiemap.h>
40 #include <linux/namei.h>
41 #include "xattr.h"
42 #include "acl.h"
43
44 static int ext3_writepage_trans_blocks(struct inode *inode);
45
46 /*
47 * Test whether an inode is a fast symlink.
48 */
49 static int ext3_inode_is_fast_symlink(struct inode *inode)
50 {
51 int ea_blocks = EXT3_I(inode)->i_file_acl ?
52 (inode->i_sb->s_blocksize >> 9) : 0;
53
54 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
55 }
56
57 /*
58 * The ext3 forget function must perform a revoke if we are freeing data
59 * which has been journaled. Metadata (eg. indirect blocks) must be
60 * revoked in all cases.
61 *
62 * "bh" may be NULL: a metadata block may have been freed from memory
63 * but there may still be a record of it in the journal, and that record
64 * still needs to be revoked.
65 */
66 int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
67 struct buffer_head *bh, ext3_fsblk_t blocknr)
68 {
69 int err;
70
71 might_sleep();
72
73 BUFFER_TRACE(bh, "enter");
74
75 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
76 "data mode %lx\n",
77 bh, is_metadata, inode->i_mode,
78 test_opt(inode->i_sb, DATA_FLAGS));
79
80 /* Never use the revoke function if we are doing full data
81 * journaling: there is no need to, and a V1 superblock won't
82 * support it. Otherwise, only skip the revoke on un-journaled
83 * data blocks. */
84
85 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
86 (!is_metadata && !ext3_should_journal_data(inode))) {
87 if (bh) {
88 BUFFER_TRACE(bh, "call journal_forget");
89 return ext3_journal_forget(handle, bh);
90 }
91 return 0;
92 }
93
94 /*
95 * data!=journal && (is_metadata || should_journal_data(inode))
96 */
97 BUFFER_TRACE(bh, "call ext3_journal_revoke");
98 err = ext3_journal_revoke(handle, blocknr, bh);
99 if (err)
100 ext3_abort(inode->i_sb, __func__,
101 "error %d when attempting revoke", err);
102 BUFFER_TRACE(bh, "exit");
103 return err;
104 }
105
106 /*
107 * Work out how many blocks we need to proceed with the next chunk of a
108 * truncate transaction.
109 */
110 static unsigned long blocks_for_truncate(struct inode *inode)
111 {
112 unsigned long needed;
113
114 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
115
116 /* Give ourselves just enough room to cope with inodes in which
117 * i_blocks is corrupt: we've seen disk corruptions in the past
118 * which resulted in random data in an inode which looked enough
119 * like a regular file for ext3 to try to delete it. Things
120 * will go a bit crazy if that happens, but at least we should
121 * try not to panic the whole kernel. */
122 if (needed < 2)
123 needed = 2;
124
125 /* But we need to bound the transaction so we don't overflow the
126 * journal. */
127 if (needed > EXT3_MAX_TRANS_DATA)
128 needed = EXT3_MAX_TRANS_DATA;
129
130 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
131 }
132
133 /*
134 * Truncate transactions can be complex and absolutely huge. So we need to
135 * be able to restart the transaction at a conventient checkpoint to make
136 * sure we don't overflow the journal.
137 *
138 * start_transaction gets us a new handle for a truncate transaction,
139 * and extend_transaction tries to extend the existing one a bit. If
140 * extend fails, we need to propagate the failure up and restart the
141 * transaction in the top-level truncate loop. --sct
142 */
143 static handle_t *start_transaction(struct inode *inode)
144 {
145 handle_t *result;
146
147 result = ext3_journal_start(inode, blocks_for_truncate(inode));
148 if (!IS_ERR(result))
149 return result;
150
151 ext3_std_error(inode->i_sb, PTR_ERR(result));
152 return result;
153 }
154
155 /*
156 * Try to extend this transaction for the purposes of truncation.
157 *
158 * Returns 0 if we managed to create more room. If we can't create more
159 * room, and the transaction must be restarted we return 1.
160 */
161 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
162 {
163 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
164 return 0;
165 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
166 return 0;
167 return 1;
168 }
169
170 /*
171 * Restart the transaction associated with *handle. This does a commit,
172 * so before we call here everything must be consistently dirtied against
173 * this transaction.
174 */
175 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
176 {
177 jbd_debug(2, "restarting handle %p\n", handle);
178 return ext3_journal_restart(handle, blocks_for_truncate(inode));
179 }
180
181 /*
182 * Called at the last iput() if i_nlink is zero.
183 */
184 void ext3_delete_inode (struct inode * inode)
185 {
186 handle_t *handle;
187
188 truncate_inode_pages(&inode->i_data, 0);
189
190 if (is_bad_inode(inode))
191 goto no_delete;
192
193 handle = start_transaction(inode);
194 if (IS_ERR(handle)) {
195 /*
196 * If we're going to skip the normal cleanup, we still need to
197 * make sure that the in-core orphan linked list is properly
198 * cleaned up.
199 */
200 ext3_orphan_del(NULL, inode);
201 goto no_delete;
202 }
203
204 if (IS_SYNC(inode))
205 handle->h_sync = 1;
206 inode->i_size = 0;
207 if (inode->i_blocks)
208 ext3_truncate(inode);
209 /*
210 * Kill off the orphan record which ext3_truncate created.
211 * AKPM: I think this can be inside the above `if'.
212 * Note that ext3_orphan_del() has to be able to cope with the
213 * deletion of a non-existent orphan - this is because we don't
214 * know if ext3_truncate() actually created an orphan record.
215 * (Well, we could do this if we need to, but heck - it works)
216 */
217 ext3_orphan_del(handle, inode);
218 EXT3_I(inode)->i_dtime = get_seconds();
219
220 /*
221 * One subtle ordering requirement: if anything has gone wrong
222 * (transaction abort, IO errors, whatever), then we can still
223 * do these next steps (the fs will already have been marked as
224 * having errors), but we can't free the inode if the mark_dirty
225 * fails.
226 */
227 if (ext3_mark_inode_dirty(handle, inode))
228 /* If that failed, just do the required in-core inode clear. */
229 clear_inode(inode);
230 else
231 ext3_free_inode(handle, inode);
232 ext3_journal_stop(handle);
233 return;
234 no_delete:
235 clear_inode(inode); /* We must guarantee clearing of inode... */
236 }
237
238 typedef struct {
239 __le32 *p;
240 __le32 key;
241 struct buffer_head *bh;
242 } Indirect;
243
244 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
245 {
246 p->key = *(p->p = v);
247 p->bh = bh;
248 }
249
250 static int verify_chain(Indirect *from, Indirect *to)
251 {
252 while (from <= to && from->key == *from->p)
253 from++;
254 return (from > to);
255 }
256
257 /**
258 * ext3_block_to_path - parse the block number into array of offsets
259 * @inode: inode in question (we are only interested in its superblock)
260 * @i_block: block number to be parsed
261 * @offsets: array to store the offsets in
262 * @boundary: set this non-zero if the referred-to block is likely to be
263 * followed (on disk) by an indirect block.
264 *
265 * To store the locations of file's data ext3 uses a data structure common
266 * for UNIX filesystems - tree of pointers anchored in the inode, with
267 * data blocks at leaves and indirect blocks in intermediate nodes.
268 * This function translates the block number into path in that tree -
269 * return value is the path length and @offsets[n] is the offset of
270 * pointer to (n+1)th node in the nth one. If @block is out of range
271 * (negative or too large) warning is printed and zero returned.
272 *
273 * Note: function doesn't find node addresses, so no IO is needed. All
274 * we need to know is the capacity of indirect blocks (taken from the
275 * inode->i_sb).
276 */
277
278 /*
279 * Portability note: the last comparison (check that we fit into triple
280 * indirect block) is spelled differently, because otherwise on an
281 * architecture with 32-bit longs and 8Kb pages we might get into trouble
282 * if our filesystem had 8Kb blocks. We might use long long, but that would
283 * kill us on x86. Oh, well, at least the sign propagation does not matter -
284 * i_block would have to be negative in the very beginning, so we would not
285 * get there at all.
286 */
287
288 static int ext3_block_to_path(struct inode *inode,
289 long i_block, int offsets[4], int *boundary)
290 {
291 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
292 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
293 const long direct_blocks = EXT3_NDIR_BLOCKS,
294 indirect_blocks = ptrs,
295 double_blocks = (1 << (ptrs_bits * 2));
296 int n = 0;
297 int final = 0;
298
299 if (i_block < 0) {
300 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
301 } else if (i_block < direct_blocks) {
302 offsets[n++] = i_block;
303 final = direct_blocks;
304 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
305 offsets[n++] = EXT3_IND_BLOCK;
306 offsets[n++] = i_block;
307 final = ptrs;
308 } else if ((i_block -= indirect_blocks) < double_blocks) {
309 offsets[n++] = EXT3_DIND_BLOCK;
310 offsets[n++] = i_block >> ptrs_bits;
311 offsets[n++] = i_block & (ptrs - 1);
312 final = ptrs;
313 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
314 offsets[n++] = EXT3_TIND_BLOCK;
315 offsets[n++] = i_block >> (ptrs_bits * 2);
316 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
317 offsets[n++] = i_block & (ptrs - 1);
318 final = ptrs;
319 } else {
320 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
321 }
322 if (boundary)
323 *boundary = final - 1 - (i_block & (ptrs - 1));
324 return n;
325 }
326
327 /**
328 * ext3_get_branch - read the chain of indirect blocks leading to data
329 * @inode: inode in question
330 * @depth: depth of the chain (1 - direct pointer, etc.)
331 * @offsets: offsets of pointers in inode/indirect blocks
332 * @chain: place to store the result
333 * @err: here we store the error value
334 *
335 * Function fills the array of triples <key, p, bh> and returns %NULL
336 * if everything went OK or the pointer to the last filled triple
337 * (incomplete one) otherwise. Upon the return chain[i].key contains
338 * the number of (i+1)-th block in the chain (as it is stored in memory,
339 * i.e. little-endian 32-bit), chain[i].p contains the address of that
340 * number (it points into struct inode for i==0 and into the bh->b_data
341 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
342 * block for i>0 and NULL for i==0. In other words, it holds the block
343 * numbers of the chain, addresses they were taken from (and where we can
344 * verify that chain did not change) and buffer_heads hosting these
345 * numbers.
346 *
347 * Function stops when it stumbles upon zero pointer (absent block)
348 * (pointer to last triple returned, *@err == 0)
349 * or when it gets an IO error reading an indirect block
350 * (ditto, *@err == -EIO)
351 * or when it notices that chain had been changed while it was reading
352 * (ditto, *@err == -EAGAIN)
353 * or when it reads all @depth-1 indirect blocks successfully and finds
354 * the whole chain, all way to the data (returns %NULL, *err == 0).
355 */
356 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
357 Indirect chain[4], int *err)
358 {
359 struct super_block *sb = inode->i_sb;
360 Indirect *p = chain;
361 struct buffer_head *bh;
362
363 *err = 0;
364 /* i_data is not going away, no lock needed */
365 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
366 if (!p->key)
367 goto no_block;
368 while (--depth) {
369 bh = sb_bread(sb, le32_to_cpu(p->key));
370 if (!bh)
371 goto failure;
372 /* Reader: pointers */
373 if (!verify_chain(chain, p))
374 goto changed;
375 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
376 /* Reader: end */
377 if (!p->key)
378 goto no_block;
379 }
380 return NULL;
381
382 changed:
383 brelse(bh);
384 *err = -EAGAIN;
385 goto no_block;
386 failure:
387 *err = -EIO;
388 no_block:
389 return p;
390 }
391
392 /**
393 * ext3_find_near - find a place for allocation with sufficient locality
394 * @inode: owner
395 * @ind: descriptor of indirect block.
396 *
397 * This function returns the preferred place for block allocation.
398 * It is used when heuristic for sequential allocation fails.
399 * Rules are:
400 * + if there is a block to the left of our position - allocate near it.
401 * + if pointer will live in indirect block - allocate near that block.
402 * + if pointer will live in inode - allocate in the same
403 * cylinder group.
404 *
405 * In the latter case we colour the starting block by the callers PID to
406 * prevent it from clashing with concurrent allocations for a different inode
407 * in the same block group. The PID is used here so that functionally related
408 * files will be close-by on-disk.
409 *
410 * Caller must make sure that @ind is valid and will stay that way.
411 */
412 static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
413 {
414 struct ext3_inode_info *ei = EXT3_I(inode);
415 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
416 __le32 *p;
417 ext3_fsblk_t bg_start;
418 ext3_grpblk_t colour;
419
420 /* Try to find previous block */
421 for (p = ind->p - 1; p >= start; p--) {
422 if (*p)
423 return le32_to_cpu(*p);
424 }
425
426 /* No such thing, so let's try location of indirect block */
427 if (ind->bh)
428 return ind->bh->b_blocknr;
429
430 /*
431 * It is going to be referred to from the inode itself? OK, just put it
432 * into the same cylinder group then.
433 */
434 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
435 colour = (current->pid % 16) *
436 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
437 return bg_start + colour;
438 }
439
440 /**
441 * ext3_find_goal - find a preferred place for allocation.
442 * @inode: owner
443 * @block: block we want
444 * @partial: pointer to the last triple within a chain
445 *
446 * Normally this function find the preferred place for block allocation,
447 * returns it.
448 */
449
450 static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
451 Indirect *partial)
452 {
453 struct ext3_block_alloc_info *block_i;
454
455 block_i = EXT3_I(inode)->i_block_alloc_info;
456
457 /*
458 * try the heuristic for sequential allocation,
459 * failing that at least try to get decent locality.
460 */
461 if (block_i && (block == block_i->last_alloc_logical_block + 1)
462 && (block_i->last_alloc_physical_block != 0)) {
463 return block_i->last_alloc_physical_block + 1;
464 }
465
466 return ext3_find_near(inode, partial);
467 }
468
469 /**
470 * ext3_blks_to_allocate: Look up the block map and count the number
471 * of direct blocks need to be allocated for the given branch.
472 *
473 * @branch: chain of indirect blocks
474 * @k: number of blocks need for indirect blocks
475 * @blks: number of data blocks to be mapped.
476 * @blocks_to_boundary: the offset in the indirect block
477 *
478 * return the total number of blocks to be allocate, including the
479 * direct and indirect blocks.
480 */
481 static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
482 int blocks_to_boundary)
483 {
484 unsigned long count = 0;
485
486 /*
487 * Simple case, [t,d]Indirect block(s) has not allocated yet
488 * then it's clear blocks on that path have not allocated
489 */
490 if (k > 0) {
491 /* right now we don't handle cross boundary allocation */
492 if (blks < blocks_to_boundary + 1)
493 count += blks;
494 else
495 count += blocks_to_boundary + 1;
496 return count;
497 }
498
499 count++;
500 while (count < blks && count <= blocks_to_boundary &&
501 le32_to_cpu(*(branch[0].p + count)) == 0) {
502 count++;
503 }
504 return count;
505 }
506
507 /**
508 * ext3_alloc_blocks: multiple allocate blocks needed for a branch
509 * @indirect_blks: the number of blocks need to allocate for indirect
510 * blocks
511 *
512 * @new_blocks: on return it will store the new block numbers for
513 * the indirect blocks(if needed) and the first direct block,
514 * @blks: on return it will store the total number of allocated
515 * direct blocks
516 */
517 static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
518 ext3_fsblk_t goal, int indirect_blks, int blks,
519 ext3_fsblk_t new_blocks[4], int *err)
520 {
521 int target, i;
522 unsigned long count = 0;
523 int index = 0;
524 ext3_fsblk_t current_block = 0;
525 int ret = 0;
526
527 /*
528 * Here we try to allocate the requested multiple blocks at once,
529 * on a best-effort basis.
530 * To build a branch, we should allocate blocks for
531 * the indirect blocks(if not allocated yet), and at least
532 * the first direct block of this branch. That's the
533 * minimum number of blocks need to allocate(required)
534 */
535 target = blks + indirect_blks;
536
537 while (1) {
538 count = target;
539 /* allocating blocks for indirect blocks and direct blocks */
540 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
541 if (*err)
542 goto failed_out;
543
544 target -= count;
545 /* allocate blocks for indirect blocks */
546 while (index < indirect_blks && count) {
547 new_blocks[index++] = current_block++;
548 count--;
549 }
550
551 if (count > 0)
552 break;
553 }
554
555 /* save the new block number for the first direct block */
556 new_blocks[index] = current_block;
557
558 /* total number of blocks allocated for direct blocks */
559 ret = count;
560 *err = 0;
561 return ret;
562 failed_out:
563 for (i = 0; i <index; i++)
564 ext3_free_blocks(handle, inode, new_blocks[i], 1);
565 return ret;
566 }
567
568 /**
569 * ext3_alloc_branch - allocate and set up a chain of blocks.
570 * @inode: owner
571 * @indirect_blks: number of allocated indirect blocks
572 * @blks: number of allocated direct blocks
573 * @offsets: offsets (in the blocks) to store the pointers to next.
574 * @branch: place to store the chain in.
575 *
576 * This function allocates blocks, zeroes out all but the last one,
577 * links them into chain and (if we are synchronous) writes them to disk.
578 * In other words, it prepares a branch that can be spliced onto the
579 * inode. It stores the information about that chain in the branch[], in
580 * the same format as ext3_get_branch() would do. We are calling it after
581 * we had read the existing part of chain and partial points to the last
582 * triple of that (one with zero ->key). Upon the exit we have the same
583 * picture as after the successful ext3_get_block(), except that in one
584 * place chain is disconnected - *branch->p is still zero (we did not
585 * set the last link), but branch->key contains the number that should
586 * be placed into *branch->p to fill that gap.
587 *
588 * If allocation fails we free all blocks we've allocated (and forget
589 * their buffer_heads) and return the error value the from failed
590 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
591 * as described above and return 0.
592 */
593 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
594 int indirect_blks, int *blks, ext3_fsblk_t goal,
595 int *offsets, Indirect *branch)
596 {
597 int blocksize = inode->i_sb->s_blocksize;
598 int i, n = 0;
599 int err = 0;
600 struct buffer_head *bh;
601 int num;
602 ext3_fsblk_t new_blocks[4];
603 ext3_fsblk_t current_block;
604
605 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
606 *blks, new_blocks, &err);
607 if (err)
608 return err;
609
610 branch[0].key = cpu_to_le32(new_blocks[0]);
611 /*
612 * metadata blocks and data blocks are allocated.
613 */
614 for (n = 1; n <= indirect_blks; n++) {
615 /*
616 * Get buffer_head for parent block, zero it out
617 * and set the pointer to new one, then send
618 * parent to disk.
619 */
620 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
621 branch[n].bh = bh;
622 lock_buffer(bh);
623 BUFFER_TRACE(bh, "call get_create_access");
624 err = ext3_journal_get_create_access(handle, bh);
625 if (err) {
626 unlock_buffer(bh);
627 brelse(bh);
628 goto failed;
629 }
630
631 memset(bh->b_data, 0, blocksize);
632 branch[n].p = (__le32 *) bh->b_data + offsets[n];
633 branch[n].key = cpu_to_le32(new_blocks[n]);
634 *branch[n].p = branch[n].key;
635 if ( n == indirect_blks) {
636 current_block = new_blocks[n];
637 /*
638 * End of chain, update the last new metablock of
639 * the chain to point to the new allocated
640 * data blocks numbers
641 */
642 for (i=1; i < num; i++)
643 *(branch[n].p + i) = cpu_to_le32(++current_block);
644 }
645 BUFFER_TRACE(bh, "marking uptodate");
646 set_buffer_uptodate(bh);
647 unlock_buffer(bh);
648
649 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
650 err = ext3_journal_dirty_metadata(handle, bh);
651 if (err)
652 goto failed;
653 }
654 *blks = num;
655 return err;
656 failed:
657 /* Allocation failed, free what we already allocated */
658 for (i = 1; i <= n ; i++) {
659 BUFFER_TRACE(branch[i].bh, "call journal_forget");
660 ext3_journal_forget(handle, branch[i].bh);
661 }
662 for (i = 0; i <indirect_blks; i++)
663 ext3_free_blocks(handle, inode, new_blocks[i], 1);
664
665 ext3_free_blocks(handle, inode, new_blocks[i], num);
666
667 return err;
668 }
669
670 /**
671 * ext3_splice_branch - splice the allocated branch onto inode.
672 * @inode: owner
673 * @block: (logical) number of block we are adding
674 * @chain: chain of indirect blocks (with a missing link - see
675 * ext3_alloc_branch)
676 * @where: location of missing link
677 * @num: number of indirect blocks we are adding
678 * @blks: number of direct blocks we are adding
679 *
680 * This function fills the missing link and does all housekeeping needed in
681 * inode (->i_blocks, etc.). In case of success we end up with the full
682 * chain to new block and return 0.
683 */
684 static int ext3_splice_branch(handle_t *handle, struct inode *inode,
685 long block, Indirect *where, int num, int blks)
686 {
687 int i;
688 int err = 0;
689 struct ext3_block_alloc_info *block_i;
690 ext3_fsblk_t current_block;
691
692 block_i = EXT3_I(inode)->i_block_alloc_info;
693 /*
694 * If we're splicing into a [td]indirect block (as opposed to the
695 * inode) then we need to get write access to the [td]indirect block
696 * before the splice.
697 */
698 if (where->bh) {
699 BUFFER_TRACE(where->bh, "get_write_access");
700 err = ext3_journal_get_write_access(handle, where->bh);
701 if (err)
702 goto err_out;
703 }
704 /* That's it */
705
706 *where->p = where->key;
707
708 /*
709 * Update the host buffer_head or inode to point to more just allocated
710 * direct blocks blocks
711 */
712 if (num == 0 && blks > 1) {
713 current_block = le32_to_cpu(where->key) + 1;
714 for (i = 1; i < blks; i++)
715 *(where->p + i ) = cpu_to_le32(current_block++);
716 }
717
718 /*
719 * update the most recently allocated logical & physical block
720 * in i_block_alloc_info, to assist find the proper goal block for next
721 * allocation
722 */
723 if (block_i) {
724 block_i->last_alloc_logical_block = block + blks - 1;
725 block_i->last_alloc_physical_block =
726 le32_to_cpu(where[num].key) + blks - 1;
727 }
728
729 /* We are done with atomic stuff, now do the rest of housekeeping */
730
731 inode->i_ctime = CURRENT_TIME_SEC;
732 ext3_mark_inode_dirty(handle, inode);
733
734 /* had we spliced it onto indirect block? */
735 if (where->bh) {
736 /*
737 * If we spliced it onto an indirect block, we haven't
738 * altered the inode. Note however that if it is being spliced
739 * onto an indirect block at the very end of the file (the
740 * file is growing) then we *will* alter the inode to reflect
741 * the new i_size. But that is not done here - it is done in
742 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
743 */
744 jbd_debug(5, "splicing indirect only\n");
745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
746 err = ext3_journal_dirty_metadata(handle, where->bh);
747 if (err)
748 goto err_out;
749 } else {
750 /*
751 * OK, we spliced it into the inode itself on a direct block.
752 * Inode was dirtied above.
753 */
754 jbd_debug(5, "splicing direct\n");
755 }
756 return err;
757
758 err_out:
759 for (i = 1; i <= num; i++) {
760 BUFFER_TRACE(where[i].bh, "call journal_forget");
761 ext3_journal_forget(handle, where[i].bh);
762 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
763 }
764 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
765
766 return err;
767 }
768
769 /*
770 * Allocation strategy is simple: if we have to allocate something, we will
771 * have to go the whole way to leaf. So let's do it before attaching anything
772 * to tree, set linkage between the newborn blocks, write them if sync is
773 * required, recheck the path, free and repeat if check fails, otherwise
774 * set the last missing link (that will protect us from any truncate-generated
775 * removals - all blocks on the path are immune now) and possibly force the
776 * write on the parent block.
777 * That has a nice additional property: no special recovery from the failed
778 * allocations is needed - we simply release blocks and do not touch anything
779 * reachable from inode.
780 *
781 * `handle' can be NULL if create == 0.
782 *
783 * The BKL may not be held on entry here. Be sure to take it early.
784 * return > 0, # of blocks mapped or allocated.
785 * return = 0, if plain lookup failed.
786 * return < 0, error case.
787 */
788 int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
789 sector_t iblock, unsigned long maxblocks,
790 struct buffer_head *bh_result,
791 int create, int extend_disksize)
792 {
793 int err = -EIO;
794 int offsets[4];
795 Indirect chain[4];
796 Indirect *partial;
797 ext3_fsblk_t goal;
798 int indirect_blks;
799 int blocks_to_boundary = 0;
800 int depth;
801 struct ext3_inode_info *ei = EXT3_I(inode);
802 int count = 0;
803 ext3_fsblk_t first_block = 0;
804
805
806 J_ASSERT(handle != NULL || create == 0);
807 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
808
809 if (depth == 0)
810 goto out;
811
812 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
813
814 /* Simplest case - block found, no allocation needed */
815 if (!partial) {
816 first_block = le32_to_cpu(chain[depth - 1].key);
817 clear_buffer_new(bh_result);
818 count++;
819 /*map more blocks*/
820 while (count < maxblocks && count <= blocks_to_boundary) {
821 ext3_fsblk_t blk;
822
823 if (!verify_chain(chain, chain + depth - 1)) {
824 /*
825 * Indirect block might be removed by
826 * truncate while we were reading it.
827 * Handling of that case: forget what we've
828 * got now. Flag the err as EAGAIN, so it
829 * will reread.
830 */
831 err = -EAGAIN;
832 count = 0;
833 break;
834 }
835 blk = le32_to_cpu(*(chain[depth-1].p + count));
836
837 if (blk == first_block + count)
838 count++;
839 else
840 break;
841 }
842 if (err != -EAGAIN)
843 goto got_it;
844 }
845
846 /* Next simple case - plain lookup or failed read of indirect block */
847 if (!create || err == -EIO)
848 goto cleanup;
849
850 mutex_lock(&ei->truncate_mutex);
851
852 /*
853 * If the indirect block is missing while we are reading
854 * the chain(ext3_get_branch() returns -EAGAIN err), or
855 * if the chain has been changed after we grab the semaphore,
856 * (either because another process truncated this branch, or
857 * another get_block allocated this branch) re-grab the chain to see if
858 * the request block has been allocated or not.
859 *
860 * Since we already block the truncate/other get_block
861 * at this point, we will have the current copy of the chain when we
862 * splice the branch into the tree.
863 */
864 if (err == -EAGAIN || !verify_chain(chain, partial)) {
865 while (partial > chain) {
866 brelse(partial->bh);
867 partial--;
868 }
869 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
870 if (!partial) {
871 count++;
872 mutex_unlock(&ei->truncate_mutex);
873 if (err)
874 goto cleanup;
875 clear_buffer_new(bh_result);
876 goto got_it;
877 }
878 }
879
880 /*
881 * Okay, we need to do block allocation. Lazily initialize the block
882 * allocation info here if necessary
883 */
884 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
885 ext3_init_block_alloc_info(inode);
886
887 goal = ext3_find_goal(inode, iblock, partial);
888
889 /* the number of blocks need to allocate for [d,t]indirect blocks */
890 indirect_blks = (chain + depth) - partial - 1;
891
892 /*
893 * Next look up the indirect map to count the totoal number of
894 * direct blocks to allocate for this branch.
895 */
896 count = ext3_blks_to_allocate(partial, indirect_blks,
897 maxblocks, blocks_to_boundary);
898 /*
899 * Block out ext3_truncate while we alter the tree
900 */
901 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
902 offsets + (partial - chain), partial);
903
904 /*
905 * The ext3_splice_branch call will free and forget any buffers
906 * on the new chain if there is a failure, but that risks using
907 * up transaction credits, especially for bitmaps where the
908 * credits cannot be returned. Can we handle this somehow? We
909 * may need to return -EAGAIN upwards in the worst case. --sct
910 */
911 if (!err)
912 err = ext3_splice_branch(handle, inode, iblock,
913 partial, indirect_blks, count);
914 /*
915 * i_disksize growing is protected by truncate_mutex. Don't forget to
916 * protect it if you're about to implement concurrent
917 * ext3_get_block() -bzzz
918 */
919 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
920 ei->i_disksize = inode->i_size;
921 mutex_unlock(&ei->truncate_mutex);
922 if (err)
923 goto cleanup;
924
925 set_buffer_new(bh_result);
926 got_it:
927 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
928 if (count > blocks_to_boundary)
929 set_buffer_boundary(bh_result);
930 err = count;
931 /* Clean up and exit */
932 partial = chain + depth - 1; /* the whole chain */
933 cleanup:
934 while (partial > chain) {
935 BUFFER_TRACE(partial->bh, "call brelse");
936 brelse(partial->bh);
937 partial--;
938 }
939 BUFFER_TRACE(bh_result, "returned");
940 out:
941 return err;
942 }
943
944 /* Maximum number of blocks we map for direct IO at once. */
945 #define DIO_MAX_BLOCKS 4096
946 /*
947 * Number of credits we need for writing DIO_MAX_BLOCKS:
948 * We need sb + group descriptor + bitmap + inode -> 4
949 * For B blocks with A block pointers per block we need:
950 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
951 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
952 */
953 #define DIO_CREDITS 25
954
955 static int ext3_get_block(struct inode *inode, sector_t iblock,
956 struct buffer_head *bh_result, int create)
957 {
958 handle_t *handle = ext3_journal_current_handle();
959 int ret = 0, started = 0;
960 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
961
962 if (create && !handle) { /* Direct IO write... */
963 if (max_blocks > DIO_MAX_BLOCKS)
964 max_blocks = DIO_MAX_BLOCKS;
965 handle = ext3_journal_start(inode, DIO_CREDITS +
966 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
967 if (IS_ERR(handle)) {
968 ret = PTR_ERR(handle);
969 goto out;
970 }
971 started = 1;
972 }
973
974 ret = ext3_get_blocks_handle(handle, inode, iblock,
975 max_blocks, bh_result, create, 0);
976 if (ret > 0) {
977 bh_result->b_size = (ret << inode->i_blkbits);
978 ret = 0;
979 }
980 if (started)
981 ext3_journal_stop(handle);
982 out:
983 return ret;
984 }
985
986 int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
987 u64 start, u64 len)
988 {
989 return generic_block_fiemap(inode, fieinfo, start, len,
990 ext3_get_block);
991 }
992
993 /*
994 * `handle' can be NULL if create is zero
995 */
996 struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
997 long block, int create, int *errp)
998 {
999 struct buffer_head dummy;
1000 int fatal = 0, err;
1001
1002 J_ASSERT(handle != NULL || create == 0);
1003
1004 dummy.b_state = 0;
1005 dummy.b_blocknr = -1000;
1006 buffer_trace_init(&dummy.b_history);
1007 err = ext3_get_blocks_handle(handle, inode, block, 1,
1008 &dummy, create, 1);
1009 /*
1010 * ext3_get_blocks_handle() returns number of blocks
1011 * mapped. 0 in case of a HOLE.
1012 */
1013 if (err > 0) {
1014 if (err > 1)
1015 WARN_ON(1);
1016 err = 0;
1017 }
1018 *errp = err;
1019 if (!err && buffer_mapped(&dummy)) {
1020 struct buffer_head *bh;
1021 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1022 if (!bh) {
1023 *errp = -EIO;
1024 goto err;
1025 }
1026 if (buffer_new(&dummy)) {
1027 J_ASSERT(create != 0);
1028 J_ASSERT(handle != NULL);
1029
1030 /*
1031 * Now that we do not always journal data, we should
1032 * keep in mind whether this should always journal the
1033 * new buffer as metadata. For now, regular file
1034 * writes use ext3_get_block instead, so it's not a
1035 * problem.
1036 */
1037 lock_buffer(bh);
1038 BUFFER_TRACE(bh, "call get_create_access");
1039 fatal = ext3_journal_get_create_access(handle, bh);
1040 if (!fatal && !buffer_uptodate(bh)) {
1041 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1042 set_buffer_uptodate(bh);
1043 }
1044 unlock_buffer(bh);
1045 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1046 err = ext3_journal_dirty_metadata(handle, bh);
1047 if (!fatal)
1048 fatal = err;
1049 } else {
1050 BUFFER_TRACE(bh, "not a new buffer");
1051 }
1052 if (fatal) {
1053 *errp = fatal;
1054 brelse(bh);
1055 bh = NULL;
1056 }
1057 return bh;
1058 }
1059 err:
1060 return NULL;
1061 }
1062
1063 struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1064 int block, int create, int *err)
1065 {
1066 struct buffer_head * bh;
1067
1068 bh = ext3_getblk(handle, inode, block, create, err);
1069 if (!bh)
1070 return bh;
1071 if (buffer_uptodate(bh))
1072 return bh;
1073 ll_rw_block(READ_META, 1, &bh);
1074 wait_on_buffer(bh);
1075 if (buffer_uptodate(bh))
1076 return bh;
1077 put_bh(bh);
1078 *err = -EIO;
1079 return NULL;
1080 }
1081
1082 static int walk_page_buffers( handle_t *handle,
1083 struct buffer_head *head,
1084 unsigned from,
1085 unsigned to,
1086 int *partial,
1087 int (*fn)( handle_t *handle,
1088 struct buffer_head *bh))
1089 {
1090 struct buffer_head *bh;
1091 unsigned block_start, block_end;
1092 unsigned blocksize = head->b_size;
1093 int err, ret = 0;
1094 struct buffer_head *next;
1095
1096 for ( bh = head, block_start = 0;
1097 ret == 0 && (bh != head || !block_start);
1098 block_start = block_end, bh = next)
1099 {
1100 next = bh->b_this_page;
1101 block_end = block_start + blocksize;
1102 if (block_end <= from || block_start >= to) {
1103 if (partial && !buffer_uptodate(bh))
1104 *partial = 1;
1105 continue;
1106 }
1107 err = (*fn)(handle, bh);
1108 if (!ret)
1109 ret = err;
1110 }
1111 return ret;
1112 }
1113
1114 /*
1115 * To preserve ordering, it is essential that the hole instantiation and
1116 * the data write be encapsulated in a single transaction. We cannot
1117 * close off a transaction and start a new one between the ext3_get_block()
1118 * and the commit_write(). So doing the journal_start at the start of
1119 * prepare_write() is the right place.
1120 *
1121 * Also, this function can nest inside ext3_writepage() ->
1122 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1123 * has generated enough buffer credits to do the whole page. So we won't
1124 * block on the journal in that case, which is good, because the caller may
1125 * be PF_MEMALLOC.
1126 *
1127 * By accident, ext3 can be reentered when a transaction is open via
1128 * quota file writes. If we were to commit the transaction while thus
1129 * reentered, there can be a deadlock - we would be holding a quota
1130 * lock, and the commit would never complete if another thread had a
1131 * transaction open and was blocking on the quota lock - a ranking
1132 * violation.
1133 *
1134 * So what we do is to rely on the fact that journal_stop/journal_start
1135 * will _not_ run commit under these circumstances because handle->h_ref
1136 * is elevated. We'll still have enough credits for the tiny quotafile
1137 * write.
1138 */
1139 static int do_journal_get_write_access(handle_t *handle,
1140 struct buffer_head *bh)
1141 {
1142 if (!buffer_mapped(bh) || buffer_freed(bh))
1143 return 0;
1144 return ext3_journal_get_write_access(handle, bh);
1145 }
1146
1147 static int ext3_write_begin(struct file *file, struct address_space *mapping,
1148 loff_t pos, unsigned len, unsigned flags,
1149 struct page **pagep, void **fsdata)
1150 {
1151 struct inode *inode = mapping->host;
1152 int ret;
1153 handle_t *handle;
1154 int retries = 0;
1155 struct page *page;
1156 pgoff_t index;
1157 unsigned from, to;
1158 /* Reserve one block more for addition to orphan list in case
1159 * we allocate blocks but write fails for some reason */
1160 int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1161
1162 index = pos >> PAGE_CACHE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1);
1164 to = from + len;
1165
1166 retry:
1167 page = grab_cache_page_write_begin(mapping, index, flags);
1168 if (!page)
1169 return -ENOMEM;
1170 *pagep = page;
1171
1172 handle = ext3_journal_start(inode, needed_blocks);
1173 if (IS_ERR(handle)) {
1174 unlock_page(page);
1175 page_cache_release(page);
1176 ret = PTR_ERR(handle);
1177 goto out;
1178 }
1179 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1180 ext3_get_block);
1181 if (ret)
1182 goto write_begin_failed;
1183
1184 if (ext3_should_journal_data(inode)) {
1185 ret = walk_page_buffers(handle, page_buffers(page),
1186 from, to, NULL, do_journal_get_write_access);
1187 }
1188 write_begin_failed:
1189 if (ret) {
1190 /*
1191 * block_write_begin may have instantiated a few blocks
1192 * outside i_size. Trim these off again. Don't need
1193 * i_size_read because we hold i_mutex.
1194 *
1195 * Add inode to orphan list in case we crash before truncate
1196 * finishes.
1197 */
1198 if (pos + len > inode->i_size)
1199 ext3_orphan_add(handle, inode);
1200 ext3_journal_stop(handle);
1201 unlock_page(page);
1202 page_cache_release(page);
1203 if (pos + len > inode->i_size)
1204 vmtruncate(inode, inode->i_size);
1205 }
1206 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1207 goto retry;
1208 out:
1209 return ret;
1210 }
1211
1212
1213 int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1214 {
1215 int err = journal_dirty_data(handle, bh);
1216 if (err)
1217 ext3_journal_abort_handle(__func__, __func__,
1218 bh, handle, err);
1219 return err;
1220 }
1221
1222 /* For ordered writepage and write_end functions */
1223 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1224 {
1225 /*
1226 * Write could have mapped the buffer but it didn't copy the data in
1227 * yet. So avoid filing such buffer into a transaction.
1228 */
1229 if (buffer_mapped(bh) && buffer_uptodate(bh))
1230 return ext3_journal_dirty_data(handle, bh);
1231 return 0;
1232 }
1233
1234 /* For write_end() in data=journal mode */
1235 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1236 {
1237 if (!buffer_mapped(bh) || buffer_freed(bh))
1238 return 0;
1239 set_buffer_uptodate(bh);
1240 return ext3_journal_dirty_metadata(handle, bh);
1241 }
1242
1243 /*
1244 * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1245 * for the whole page but later we failed to copy the data in. Update inode
1246 * size according to what we managed to copy. The rest is going to be
1247 * truncated in write_end function.
1248 */
1249 static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1250 {
1251 /* What matters to us is i_disksize. We don't write i_size anywhere */
1252 if (pos + copied > inode->i_size)
1253 i_size_write(inode, pos + copied);
1254 if (pos + copied > EXT3_I(inode)->i_disksize) {
1255 EXT3_I(inode)->i_disksize = pos + copied;
1256 mark_inode_dirty(inode);
1257 }
1258 }
1259
1260 /*
1261 * We need to pick up the new inode size which generic_commit_write gave us
1262 * `file' can be NULL - eg, when called from page_symlink().
1263 *
1264 * ext3 never places buffers on inode->i_mapping->private_list. metadata
1265 * buffers are managed internally.
1266 */
1267 static int ext3_ordered_write_end(struct file *file,
1268 struct address_space *mapping,
1269 loff_t pos, unsigned len, unsigned copied,
1270 struct page *page, void *fsdata)
1271 {
1272 handle_t *handle = ext3_journal_current_handle();
1273 struct inode *inode = file->f_mapping->host;
1274 unsigned from, to;
1275 int ret = 0, ret2;
1276
1277 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1278
1279 from = pos & (PAGE_CACHE_SIZE - 1);
1280 to = from + copied;
1281 ret = walk_page_buffers(handle, page_buffers(page),
1282 from, to, NULL, journal_dirty_data_fn);
1283
1284 if (ret == 0)
1285 update_file_sizes(inode, pos, copied);
1286 /*
1287 * There may be allocated blocks outside of i_size because
1288 * we failed to copy some data. Prepare for truncate.
1289 */
1290 if (pos + len > inode->i_size)
1291 ext3_orphan_add(handle, inode);
1292 ret2 = ext3_journal_stop(handle);
1293 if (!ret)
1294 ret = ret2;
1295 unlock_page(page);
1296 page_cache_release(page);
1297
1298 if (pos + len > inode->i_size)
1299 vmtruncate(inode, inode->i_size);
1300 return ret ? ret : copied;
1301 }
1302
1303 static int ext3_writeback_write_end(struct file *file,
1304 struct address_space *mapping,
1305 loff_t pos, unsigned len, unsigned copied,
1306 struct page *page, void *fsdata)
1307 {
1308 handle_t *handle = ext3_journal_current_handle();
1309 struct inode *inode = file->f_mapping->host;
1310 int ret;
1311
1312 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1313 update_file_sizes(inode, pos, copied);
1314 /*
1315 * There may be allocated blocks outside of i_size because
1316 * we failed to copy some data. Prepare for truncate.
1317 */
1318 if (pos + len > inode->i_size)
1319 ext3_orphan_add(handle, inode);
1320 ret = ext3_journal_stop(handle);
1321 unlock_page(page);
1322 page_cache_release(page);
1323
1324 if (pos + len > inode->i_size)
1325 vmtruncate(inode, inode->i_size);
1326 return ret ? ret : copied;
1327 }
1328
1329 static int ext3_journalled_write_end(struct file *file,
1330 struct address_space *mapping,
1331 loff_t pos, unsigned len, unsigned copied,
1332 struct page *page, void *fsdata)
1333 {
1334 handle_t *handle = ext3_journal_current_handle();
1335 struct inode *inode = mapping->host;
1336 int ret = 0, ret2;
1337 int partial = 0;
1338 unsigned from, to;
1339
1340 from = pos & (PAGE_CACHE_SIZE - 1);
1341 to = from + len;
1342
1343 if (copied < len) {
1344 if (!PageUptodate(page))
1345 copied = 0;
1346 page_zero_new_buffers(page, from + copied, to);
1347 to = from + copied;
1348 }
1349
1350 ret = walk_page_buffers(handle, page_buffers(page), from,
1351 to, &partial, write_end_fn);
1352 if (!partial)
1353 SetPageUptodate(page);
1354
1355 if (pos + copied > inode->i_size)
1356 i_size_write(inode, pos + copied);
1357 /*
1358 * There may be allocated blocks outside of i_size because
1359 * we failed to copy some data. Prepare for truncate.
1360 */
1361 if (pos + len > inode->i_size)
1362 ext3_orphan_add(handle, inode);
1363 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1364 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1365 EXT3_I(inode)->i_disksize = inode->i_size;
1366 ret2 = ext3_mark_inode_dirty(handle, inode);
1367 if (!ret)
1368 ret = ret2;
1369 }
1370
1371 ret2 = ext3_journal_stop(handle);
1372 if (!ret)
1373 ret = ret2;
1374 unlock_page(page);
1375 page_cache_release(page);
1376
1377 if (pos + len > inode->i_size)
1378 vmtruncate(inode, inode->i_size);
1379 return ret ? ret : copied;
1380 }
1381
1382 /*
1383 * bmap() is special. It gets used by applications such as lilo and by
1384 * the swapper to find the on-disk block of a specific piece of data.
1385 *
1386 * Naturally, this is dangerous if the block concerned is still in the
1387 * journal. If somebody makes a swapfile on an ext3 data-journaling
1388 * filesystem and enables swap, then they may get a nasty shock when the
1389 * data getting swapped to that swapfile suddenly gets overwritten by
1390 * the original zero's written out previously to the journal and
1391 * awaiting writeback in the kernel's buffer cache.
1392 *
1393 * So, if we see any bmap calls here on a modified, data-journaled file,
1394 * take extra steps to flush any blocks which might be in the cache.
1395 */
1396 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1397 {
1398 struct inode *inode = mapping->host;
1399 journal_t *journal;
1400 int err;
1401
1402 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1403 /*
1404 * This is a REALLY heavyweight approach, but the use of
1405 * bmap on dirty files is expected to be extremely rare:
1406 * only if we run lilo or swapon on a freshly made file
1407 * do we expect this to happen.
1408 *
1409 * (bmap requires CAP_SYS_RAWIO so this does not
1410 * represent an unprivileged user DOS attack --- we'd be
1411 * in trouble if mortal users could trigger this path at
1412 * will.)
1413 *
1414 * NB. EXT3_STATE_JDATA is not set on files other than
1415 * regular files. If somebody wants to bmap a directory
1416 * or symlink and gets confused because the buffer
1417 * hasn't yet been flushed to disk, they deserve
1418 * everything they get.
1419 */
1420
1421 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1422 journal = EXT3_JOURNAL(inode);
1423 journal_lock_updates(journal);
1424 err = journal_flush(journal);
1425 journal_unlock_updates(journal);
1426
1427 if (err)
1428 return 0;
1429 }
1430
1431 return generic_block_bmap(mapping,block,ext3_get_block);
1432 }
1433
1434 static int bget_one(handle_t *handle, struct buffer_head *bh)
1435 {
1436 get_bh(bh);
1437 return 0;
1438 }
1439
1440 static int bput_one(handle_t *handle, struct buffer_head *bh)
1441 {
1442 put_bh(bh);
1443 return 0;
1444 }
1445
1446 static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1447 {
1448 return !buffer_mapped(bh);
1449 }
1450
1451 /*
1452 * Note that we always start a transaction even if we're not journalling
1453 * data. This is to preserve ordering: any hole instantiation within
1454 * __block_write_full_page -> ext3_get_block() should be journalled
1455 * along with the data so we don't crash and then get metadata which
1456 * refers to old data.
1457 *
1458 * In all journalling modes block_write_full_page() will start the I/O.
1459 *
1460 * Problem:
1461 *
1462 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1463 * ext3_writepage()
1464 *
1465 * Similar for:
1466 *
1467 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1468 *
1469 * Same applies to ext3_get_block(). We will deadlock on various things like
1470 * lock_journal and i_truncate_mutex.
1471 *
1472 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1473 * allocations fail.
1474 *
1475 * 16May01: If we're reentered then journal_current_handle() will be
1476 * non-zero. We simply *return*.
1477 *
1478 * 1 July 2001: @@@ FIXME:
1479 * In journalled data mode, a data buffer may be metadata against the
1480 * current transaction. But the same file is part of a shared mapping
1481 * and someone does a writepage() on it.
1482 *
1483 * We will move the buffer onto the async_data list, but *after* it has
1484 * been dirtied. So there's a small window where we have dirty data on
1485 * BJ_Metadata.
1486 *
1487 * Note that this only applies to the last partial page in the file. The
1488 * bit which block_write_full_page() uses prepare/commit for. (That's
1489 * broken code anyway: it's wrong for msync()).
1490 *
1491 * It's a rare case: affects the final partial page, for journalled data
1492 * where the file is subject to bith write() and writepage() in the same
1493 * transction. To fix it we'll need a custom block_write_full_page().
1494 * We'll probably need that anyway for journalling writepage() output.
1495 *
1496 * We don't honour synchronous mounts for writepage(). That would be
1497 * disastrous. Any write() or metadata operation will sync the fs for
1498 * us.
1499 *
1500 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1501 * we don't need to open a transaction here.
1502 */
1503 static int ext3_ordered_writepage(struct page *page,
1504 struct writeback_control *wbc)
1505 {
1506 struct inode *inode = page->mapping->host;
1507 struct buffer_head *page_bufs;
1508 handle_t *handle = NULL;
1509 int ret = 0;
1510 int err;
1511
1512 J_ASSERT(PageLocked(page));
1513
1514 /*
1515 * We give up here if we're reentered, because it might be for a
1516 * different filesystem.
1517 */
1518 if (ext3_journal_current_handle())
1519 goto out_fail;
1520
1521 if (!page_has_buffers(page)) {
1522 create_empty_buffers(page, inode->i_sb->s_blocksize,
1523 (1 << BH_Dirty)|(1 << BH_Uptodate));
1524 page_bufs = page_buffers(page);
1525 } else {
1526 page_bufs = page_buffers(page);
1527 if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1528 NULL, buffer_unmapped)) {
1529 /* Provide NULL get_block() to catch bugs if buffers
1530 * weren't really mapped */
1531 return block_write_full_page(page, NULL, wbc);
1532 }
1533 }
1534 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1535
1536 if (IS_ERR(handle)) {
1537 ret = PTR_ERR(handle);
1538 goto out_fail;
1539 }
1540
1541 walk_page_buffers(handle, page_bufs, 0,
1542 PAGE_CACHE_SIZE, NULL, bget_one);
1543
1544 ret = block_write_full_page(page, ext3_get_block, wbc);
1545
1546 /*
1547 * The page can become unlocked at any point now, and
1548 * truncate can then come in and change things. So we
1549 * can't touch *page from now on. But *page_bufs is
1550 * safe due to elevated refcount.
1551 */
1552
1553 /*
1554 * And attach them to the current transaction. But only if
1555 * block_write_full_page() succeeded. Otherwise they are unmapped,
1556 * and generally junk.
1557 */
1558 if (ret == 0) {
1559 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1560 NULL, journal_dirty_data_fn);
1561 if (!ret)
1562 ret = err;
1563 }
1564 walk_page_buffers(handle, page_bufs, 0,
1565 PAGE_CACHE_SIZE, NULL, bput_one);
1566 err = ext3_journal_stop(handle);
1567 if (!ret)
1568 ret = err;
1569 return ret;
1570
1571 out_fail:
1572 redirty_page_for_writepage(wbc, page);
1573 unlock_page(page);
1574 return ret;
1575 }
1576
1577 static int ext3_writeback_writepage(struct page *page,
1578 struct writeback_control *wbc)
1579 {
1580 struct inode *inode = page->mapping->host;
1581 handle_t *handle = NULL;
1582 int ret = 0;
1583 int err;
1584
1585 if (ext3_journal_current_handle())
1586 goto out_fail;
1587
1588 if (page_has_buffers(page)) {
1589 if (!walk_page_buffers(NULL, page_buffers(page), 0,
1590 PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1591 /* Provide NULL get_block() to catch bugs if buffers
1592 * weren't really mapped */
1593 return block_write_full_page(page, NULL, wbc);
1594 }
1595 }
1596
1597 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1598 if (IS_ERR(handle)) {
1599 ret = PTR_ERR(handle);
1600 goto out_fail;
1601 }
1602
1603 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1604 ret = nobh_writepage(page, ext3_get_block, wbc);
1605 else
1606 ret = block_write_full_page(page, ext3_get_block, wbc);
1607
1608 err = ext3_journal_stop(handle);
1609 if (!ret)
1610 ret = err;
1611 return ret;
1612
1613 out_fail:
1614 redirty_page_for_writepage(wbc, page);
1615 unlock_page(page);
1616 return ret;
1617 }
1618
1619 static int ext3_journalled_writepage(struct page *page,
1620 struct writeback_control *wbc)
1621 {
1622 struct inode *inode = page->mapping->host;
1623 handle_t *handle = NULL;
1624 int ret = 0;
1625 int err;
1626
1627 if (ext3_journal_current_handle())
1628 goto no_write;
1629
1630 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1631 if (IS_ERR(handle)) {
1632 ret = PTR_ERR(handle);
1633 goto no_write;
1634 }
1635
1636 if (!page_has_buffers(page) || PageChecked(page)) {
1637 /*
1638 * It's mmapped pagecache. Add buffers and journal it. There
1639 * doesn't seem much point in redirtying the page here.
1640 */
1641 ClearPageChecked(page);
1642 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1643 ext3_get_block);
1644 if (ret != 0) {
1645 ext3_journal_stop(handle);
1646 goto out_unlock;
1647 }
1648 ret = walk_page_buffers(handle, page_buffers(page), 0,
1649 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1650
1651 err = walk_page_buffers(handle, page_buffers(page), 0,
1652 PAGE_CACHE_SIZE, NULL, write_end_fn);
1653 if (ret == 0)
1654 ret = err;
1655 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1656 unlock_page(page);
1657 } else {
1658 /*
1659 * It may be a page full of checkpoint-mode buffers. We don't
1660 * really know unless we go poke around in the buffer_heads.
1661 * But block_write_full_page will do the right thing.
1662 */
1663 ret = block_write_full_page(page, ext3_get_block, wbc);
1664 }
1665 err = ext3_journal_stop(handle);
1666 if (!ret)
1667 ret = err;
1668 out:
1669 return ret;
1670
1671 no_write:
1672 redirty_page_for_writepage(wbc, page);
1673 out_unlock:
1674 unlock_page(page);
1675 goto out;
1676 }
1677
1678 static int ext3_readpage(struct file *file, struct page *page)
1679 {
1680 return mpage_readpage(page, ext3_get_block);
1681 }
1682
1683 static int
1684 ext3_readpages(struct file *file, struct address_space *mapping,
1685 struct list_head *pages, unsigned nr_pages)
1686 {
1687 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1688 }
1689
1690 static void ext3_invalidatepage(struct page *page, unsigned long offset)
1691 {
1692 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1693
1694 /*
1695 * If it's a full truncate we just forget about the pending dirtying
1696 */
1697 if (offset == 0)
1698 ClearPageChecked(page);
1699
1700 journal_invalidatepage(journal, page, offset);
1701 }
1702
1703 static int ext3_releasepage(struct page *page, gfp_t wait)
1704 {
1705 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1706
1707 WARN_ON(PageChecked(page));
1708 if (!page_has_buffers(page))
1709 return 0;
1710 return journal_try_to_free_buffers(journal, page, wait);
1711 }
1712
1713 /*
1714 * If the O_DIRECT write will extend the file then add this inode to the
1715 * orphan list. So recovery will truncate it back to the original size
1716 * if the machine crashes during the write.
1717 *
1718 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1719 * crashes then stale disk data _may_ be exposed inside the file. But current
1720 * VFS code falls back into buffered path in that case so we are safe.
1721 */
1722 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1723 const struct iovec *iov, loff_t offset,
1724 unsigned long nr_segs)
1725 {
1726 struct file *file = iocb->ki_filp;
1727 struct inode *inode = file->f_mapping->host;
1728 struct ext3_inode_info *ei = EXT3_I(inode);
1729 handle_t *handle;
1730 ssize_t ret;
1731 int orphan = 0;
1732 size_t count = iov_length(iov, nr_segs);
1733
1734 if (rw == WRITE) {
1735 loff_t final_size = offset + count;
1736
1737 if (final_size > inode->i_size) {
1738 /* Credits for sb + inode write */
1739 handle = ext3_journal_start(inode, 2);
1740 if (IS_ERR(handle)) {
1741 ret = PTR_ERR(handle);
1742 goto out;
1743 }
1744 ret = ext3_orphan_add(handle, inode);
1745 if (ret) {
1746 ext3_journal_stop(handle);
1747 goto out;
1748 }
1749 orphan = 1;
1750 ei->i_disksize = inode->i_size;
1751 ext3_journal_stop(handle);
1752 }
1753 }
1754
1755 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1756 offset, nr_segs,
1757 ext3_get_block, NULL);
1758
1759 if (orphan) {
1760 int err;
1761
1762 /* Credits for sb + inode write */
1763 handle = ext3_journal_start(inode, 2);
1764 if (IS_ERR(handle)) {
1765 /* This is really bad luck. We've written the data
1766 * but cannot extend i_size. Bail out and pretend
1767 * the write failed... */
1768 ret = PTR_ERR(handle);
1769 goto out;
1770 }
1771 if (inode->i_nlink)
1772 ext3_orphan_del(handle, inode);
1773 if (ret > 0) {
1774 loff_t end = offset + ret;
1775 if (end > inode->i_size) {
1776 ei->i_disksize = end;
1777 i_size_write(inode, end);
1778 /*
1779 * We're going to return a positive `ret'
1780 * here due to non-zero-length I/O, so there's
1781 * no way of reporting error returns from
1782 * ext3_mark_inode_dirty() to userspace. So
1783 * ignore it.
1784 */
1785 ext3_mark_inode_dirty(handle, inode);
1786 }
1787 }
1788 err = ext3_journal_stop(handle);
1789 if (ret == 0)
1790 ret = err;
1791 }
1792 out:
1793 return ret;
1794 }
1795
1796 /*
1797 * Pages can be marked dirty completely asynchronously from ext3's journalling
1798 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1799 * much here because ->set_page_dirty is called under VFS locks. The page is
1800 * not necessarily locked.
1801 *
1802 * We cannot just dirty the page and leave attached buffers clean, because the
1803 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1804 * or jbddirty because all the journalling code will explode.
1805 *
1806 * So what we do is to mark the page "pending dirty" and next time writepage
1807 * is called, propagate that into the buffers appropriately.
1808 */
1809 static int ext3_journalled_set_page_dirty(struct page *page)
1810 {
1811 SetPageChecked(page);
1812 return __set_page_dirty_nobuffers(page);
1813 }
1814
1815 static const struct address_space_operations ext3_ordered_aops = {
1816 .readpage = ext3_readpage,
1817 .readpages = ext3_readpages,
1818 .writepage = ext3_ordered_writepage,
1819 .sync_page = block_sync_page,
1820 .write_begin = ext3_write_begin,
1821 .write_end = ext3_ordered_write_end,
1822 .bmap = ext3_bmap,
1823 .invalidatepage = ext3_invalidatepage,
1824 .releasepage = ext3_releasepage,
1825 .direct_IO = ext3_direct_IO,
1826 .migratepage = buffer_migrate_page,
1827 .is_partially_uptodate = block_is_partially_uptodate,
1828 };
1829
1830 static const struct address_space_operations ext3_writeback_aops = {
1831 .readpage = ext3_readpage,
1832 .readpages = ext3_readpages,
1833 .writepage = ext3_writeback_writepage,
1834 .sync_page = block_sync_page,
1835 .write_begin = ext3_write_begin,
1836 .write_end = ext3_writeback_write_end,
1837 .bmap = ext3_bmap,
1838 .invalidatepage = ext3_invalidatepage,
1839 .releasepage = ext3_releasepage,
1840 .direct_IO = ext3_direct_IO,
1841 .migratepage = buffer_migrate_page,
1842 .is_partially_uptodate = block_is_partially_uptodate,
1843 };
1844
1845 static const struct address_space_operations ext3_journalled_aops = {
1846 .readpage = ext3_readpage,
1847 .readpages = ext3_readpages,
1848 .writepage = ext3_journalled_writepage,
1849 .sync_page = block_sync_page,
1850 .write_begin = ext3_write_begin,
1851 .write_end = ext3_journalled_write_end,
1852 .set_page_dirty = ext3_journalled_set_page_dirty,
1853 .bmap = ext3_bmap,
1854 .invalidatepage = ext3_invalidatepage,
1855 .releasepage = ext3_releasepage,
1856 .is_partially_uptodate = block_is_partially_uptodate,
1857 };
1858
1859 void ext3_set_aops(struct inode *inode)
1860 {
1861 if (ext3_should_order_data(inode))
1862 inode->i_mapping->a_ops = &ext3_ordered_aops;
1863 else if (ext3_should_writeback_data(inode))
1864 inode->i_mapping->a_ops = &ext3_writeback_aops;
1865 else
1866 inode->i_mapping->a_ops = &ext3_journalled_aops;
1867 }
1868
1869 /*
1870 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1871 * up to the end of the block which corresponds to `from'.
1872 * This required during truncate. We need to physically zero the tail end
1873 * of that block so it doesn't yield old data if the file is later grown.
1874 */
1875 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1876 struct address_space *mapping, loff_t from)
1877 {
1878 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1879 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1880 unsigned blocksize, iblock, length, pos;
1881 struct inode *inode = mapping->host;
1882 struct buffer_head *bh;
1883 int err = 0;
1884
1885 blocksize = inode->i_sb->s_blocksize;
1886 length = blocksize - (offset & (blocksize - 1));
1887 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1888
1889 /*
1890 * For "nobh" option, we can only work if we don't need to
1891 * read-in the page - otherwise we create buffers to do the IO.
1892 */
1893 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1894 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1895 zero_user(page, offset, length);
1896 set_page_dirty(page);
1897 goto unlock;
1898 }
1899
1900 if (!page_has_buffers(page))
1901 create_empty_buffers(page, blocksize, 0);
1902
1903 /* Find the buffer that contains "offset" */
1904 bh = page_buffers(page);
1905 pos = blocksize;
1906 while (offset >= pos) {
1907 bh = bh->b_this_page;
1908 iblock++;
1909 pos += blocksize;
1910 }
1911
1912 err = 0;
1913 if (buffer_freed(bh)) {
1914 BUFFER_TRACE(bh, "freed: skip");
1915 goto unlock;
1916 }
1917
1918 if (!buffer_mapped(bh)) {
1919 BUFFER_TRACE(bh, "unmapped");
1920 ext3_get_block(inode, iblock, bh, 0);
1921 /* unmapped? It's a hole - nothing to do */
1922 if (!buffer_mapped(bh)) {
1923 BUFFER_TRACE(bh, "still unmapped");
1924 goto unlock;
1925 }
1926 }
1927
1928 /* Ok, it's mapped. Make sure it's up-to-date */
1929 if (PageUptodate(page))
1930 set_buffer_uptodate(bh);
1931
1932 if (!buffer_uptodate(bh)) {
1933 err = -EIO;
1934 ll_rw_block(READ, 1, &bh);
1935 wait_on_buffer(bh);
1936 /* Uhhuh. Read error. Complain and punt. */
1937 if (!buffer_uptodate(bh))
1938 goto unlock;
1939 }
1940
1941 if (ext3_should_journal_data(inode)) {
1942 BUFFER_TRACE(bh, "get write access");
1943 err = ext3_journal_get_write_access(handle, bh);
1944 if (err)
1945 goto unlock;
1946 }
1947
1948 zero_user(page, offset, length);
1949 BUFFER_TRACE(bh, "zeroed end of block");
1950
1951 err = 0;
1952 if (ext3_should_journal_data(inode)) {
1953 err = ext3_journal_dirty_metadata(handle, bh);
1954 } else {
1955 if (ext3_should_order_data(inode))
1956 err = ext3_journal_dirty_data(handle, bh);
1957 mark_buffer_dirty(bh);
1958 }
1959
1960 unlock:
1961 unlock_page(page);
1962 page_cache_release(page);
1963 return err;
1964 }
1965
1966 /*
1967 * Probably it should be a library function... search for first non-zero word
1968 * or memcmp with zero_page, whatever is better for particular architecture.
1969 * Linus?
1970 */
1971 static inline int all_zeroes(__le32 *p, __le32 *q)
1972 {
1973 while (p < q)
1974 if (*p++)
1975 return 0;
1976 return 1;
1977 }
1978
1979 /**
1980 * ext3_find_shared - find the indirect blocks for partial truncation.
1981 * @inode: inode in question
1982 * @depth: depth of the affected branch
1983 * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1984 * @chain: place to store the pointers to partial indirect blocks
1985 * @top: place to the (detached) top of branch
1986 *
1987 * This is a helper function used by ext3_truncate().
1988 *
1989 * When we do truncate() we may have to clean the ends of several
1990 * indirect blocks but leave the blocks themselves alive. Block is
1991 * partially truncated if some data below the new i_size is refered
1992 * from it (and it is on the path to the first completely truncated
1993 * data block, indeed). We have to free the top of that path along
1994 * with everything to the right of the path. Since no allocation
1995 * past the truncation point is possible until ext3_truncate()
1996 * finishes, we may safely do the latter, but top of branch may
1997 * require special attention - pageout below the truncation point
1998 * might try to populate it.
1999 *
2000 * We atomically detach the top of branch from the tree, store the
2001 * block number of its root in *@top, pointers to buffer_heads of
2002 * partially truncated blocks - in @chain[].bh and pointers to
2003 * their last elements that should not be removed - in
2004 * @chain[].p. Return value is the pointer to last filled element
2005 * of @chain.
2006 *
2007 * The work left to caller to do the actual freeing of subtrees:
2008 * a) free the subtree starting from *@top
2009 * b) free the subtrees whose roots are stored in
2010 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
2011 * c) free the subtrees growing from the inode past the @chain[0].
2012 * (no partially truncated stuff there). */
2013
2014 static Indirect *ext3_find_shared(struct inode *inode, int depth,
2015 int offsets[4], Indirect chain[4], __le32 *top)
2016 {
2017 Indirect *partial, *p;
2018 int k, err;
2019
2020 *top = 0;
2021 /* Make k index the deepest non-null offest + 1 */
2022 for (k = depth; k > 1 && !offsets[k-1]; k--)
2023 ;
2024 partial = ext3_get_branch(inode, k, offsets, chain, &err);
2025 /* Writer: pointers */
2026 if (!partial)
2027 partial = chain + k-1;
2028 /*
2029 * If the branch acquired continuation since we've looked at it -
2030 * fine, it should all survive and (new) top doesn't belong to us.
2031 */
2032 if (!partial->key && *partial->p)
2033 /* Writer: end */
2034 goto no_top;
2035 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2036 ;
2037 /*
2038 * OK, we've found the last block that must survive. The rest of our
2039 * branch should be detached before unlocking. However, if that rest
2040 * of branch is all ours and does not grow immediately from the inode
2041 * it's easier to cheat and just decrement partial->p.
2042 */
2043 if (p == chain + k - 1 && p > chain) {
2044 p->p--;
2045 } else {
2046 *top = *p->p;
2047 /* Nope, don't do this in ext3. Must leave the tree intact */
2048 #if 0
2049 *p->p = 0;
2050 #endif
2051 }
2052 /* Writer: end */
2053
2054 while(partial > p) {
2055 brelse(partial->bh);
2056 partial--;
2057 }
2058 no_top:
2059 return partial;
2060 }
2061
2062 /*
2063 * Zero a number of block pointers in either an inode or an indirect block.
2064 * If we restart the transaction we must again get write access to the
2065 * indirect block for further modification.
2066 *
2067 * We release `count' blocks on disk, but (last - first) may be greater
2068 * than `count' because there can be holes in there.
2069 */
2070 static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2071 struct buffer_head *bh, ext3_fsblk_t block_to_free,
2072 unsigned long count, __le32 *first, __le32 *last)
2073 {
2074 __le32 *p;
2075 if (try_to_extend_transaction(handle, inode)) {
2076 if (bh) {
2077 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2078 ext3_journal_dirty_metadata(handle, bh);
2079 }
2080 ext3_mark_inode_dirty(handle, inode);
2081 ext3_journal_test_restart(handle, inode);
2082 if (bh) {
2083 BUFFER_TRACE(bh, "retaking write access");
2084 ext3_journal_get_write_access(handle, bh);
2085 }
2086 }
2087
2088 /*
2089 * Any buffers which are on the journal will be in memory. We find
2090 * them on the hash table so journal_revoke() will run journal_forget()
2091 * on them. We've already detached each block from the file, so
2092 * bforget() in journal_forget() should be safe.
2093 *
2094 * AKPM: turn on bforget in journal_forget()!!!
2095 */
2096 for (p = first; p < last; p++) {
2097 u32 nr = le32_to_cpu(*p);
2098 if (nr) {
2099 struct buffer_head *bh;
2100
2101 *p = 0;
2102 bh = sb_find_get_block(inode->i_sb, nr);
2103 ext3_forget(handle, 0, inode, bh, nr);
2104 }
2105 }
2106
2107 ext3_free_blocks(handle, inode, block_to_free, count);
2108 }
2109
2110 /**
2111 * ext3_free_data - free a list of data blocks
2112 * @handle: handle for this transaction
2113 * @inode: inode we are dealing with
2114 * @this_bh: indirect buffer_head which contains *@first and *@last
2115 * @first: array of block numbers
2116 * @last: points immediately past the end of array
2117 *
2118 * We are freeing all blocks refered from that array (numbers are stored as
2119 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2120 *
2121 * We accumulate contiguous runs of blocks to free. Conveniently, if these
2122 * blocks are contiguous then releasing them at one time will only affect one
2123 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2124 * actually use a lot of journal space.
2125 *
2126 * @this_bh will be %NULL if @first and @last point into the inode's direct
2127 * block pointers.
2128 */
2129 static void ext3_free_data(handle_t *handle, struct inode *inode,
2130 struct buffer_head *this_bh,
2131 __le32 *first, __le32 *last)
2132 {
2133 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2134 unsigned long count = 0; /* Number of blocks in the run */
2135 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2136 corresponding to
2137 block_to_free */
2138 ext3_fsblk_t nr; /* Current block # */
2139 __le32 *p; /* Pointer into inode/ind
2140 for current block */
2141 int err;
2142
2143 if (this_bh) { /* For indirect block */
2144 BUFFER_TRACE(this_bh, "get_write_access");
2145 err = ext3_journal_get_write_access(handle, this_bh);
2146 /* Important: if we can't update the indirect pointers
2147 * to the blocks, we can't free them. */
2148 if (err)
2149 return;
2150 }
2151
2152 for (p = first; p < last; p++) {
2153 nr = le32_to_cpu(*p);
2154 if (nr) {
2155 /* accumulate blocks to free if they're contiguous */
2156 if (count == 0) {
2157 block_to_free = nr;
2158 block_to_free_p = p;
2159 count = 1;
2160 } else if (nr == block_to_free + count) {
2161 count++;
2162 } else {
2163 ext3_clear_blocks(handle, inode, this_bh,
2164 block_to_free,
2165 count, block_to_free_p, p);
2166 block_to_free = nr;
2167 block_to_free_p = p;
2168 count = 1;
2169 }
2170 }
2171 }
2172
2173 if (count > 0)
2174 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2175 count, block_to_free_p, p);
2176
2177 if (this_bh) {
2178 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2179
2180 /*
2181 * The buffer head should have an attached journal head at this
2182 * point. However, if the data is corrupted and an indirect
2183 * block pointed to itself, it would have been detached when
2184 * the block was cleared. Check for this instead of OOPSing.
2185 */
2186 if (bh2jh(this_bh))
2187 ext3_journal_dirty_metadata(handle, this_bh);
2188 else
2189 ext3_error(inode->i_sb, "ext3_free_data",
2190 "circular indirect block detected, "
2191 "inode=%lu, block=%llu",
2192 inode->i_ino,
2193 (unsigned long long)this_bh->b_blocknr);
2194 }
2195 }
2196
2197 /**
2198 * ext3_free_branches - free an array of branches
2199 * @handle: JBD handle for this transaction
2200 * @inode: inode we are dealing with
2201 * @parent_bh: the buffer_head which contains *@first and *@last
2202 * @first: array of block numbers
2203 * @last: pointer immediately past the end of array
2204 * @depth: depth of the branches to free
2205 *
2206 * We are freeing all blocks refered from these branches (numbers are
2207 * stored as little-endian 32-bit) and updating @inode->i_blocks
2208 * appropriately.
2209 */
2210 static void ext3_free_branches(handle_t *handle, struct inode *inode,
2211 struct buffer_head *parent_bh,
2212 __le32 *first, __le32 *last, int depth)
2213 {
2214 ext3_fsblk_t nr;
2215 __le32 *p;
2216
2217 if (is_handle_aborted(handle))
2218 return;
2219
2220 if (depth--) {
2221 struct buffer_head *bh;
2222 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2223 p = last;
2224 while (--p >= first) {
2225 nr = le32_to_cpu(*p);
2226 if (!nr)
2227 continue; /* A hole */
2228
2229 /* Go read the buffer for the next level down */
2230 bh = sb_bread(inode->i_sb, nr);
2231
2232 /*
2233 * A read failure? Report error and clear slot
2234 * (should be rare).
2235 */
2236 if (!bh) {
2237 ext3_error(inode->i_sb, "ext3_free_branches",
2238 "Read failure, inode=%lu, block="E3FSBLK,
2239 inode->i_ino, nr);
2240 continue;
2241 }
2242
2243 /* This zaps the entire block. Bottom up. */
2244 BUFFER_TRACE(bh, "free child branches");
2245 ext3_free_branches(handle, inode, bh,
2246 (__le32*)bh->b_data,
2247 (__le32*)bh->b_data + addr_per_block,
2248 depth);
2249
2250 /*
2251 * We've probably journalled the indirect block several
2252 * times during the truncate. But it's no longer
2253 * needed and we now drop it from the transaction via
2254 * journal_revoke().
2255 *
2256 * That's easy if it's exclusively part of this
2257 * transaction. But if it's part of the committing
2258 * transaction then journal_forget() will simply
2259 * brelse() it. That means that if the underlying
2260 * block is reallocated in ext3_get_block(),
2261 * unmap_underlying_metadata() will find this block
2262 * and will try to get rid of it. damn, damn.
2263 *
2264 * If this block has already been committed to the
2265 * journal, a revoke record will be written. And
2266 * revoke records must be emitted *before* clearing
2267 * this block's bit in the bitmaps.
2268 */
2269 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2270
2271 /*
2272 * Everything below this this pointer has been
2273 * released. Now let this top-of-subtree go.
2274 *
2275 * We want the freeing of this indirect block to be
2276 * atomic in the journal with the updating of the
2277 * bitmap block which owns it. So make some room in
2278 * the journal.
2279 *
2280 * We zero the parent pointer *after* freeing its
2281 * pointee in the bitmaps, so if extend_transaction()
2282 * for some reason fails to put the bitmap changes and
2283 * the release into the same transaction, recovery
2284 * will merely complain about releasing a free block,
2285 * rather than leaking blocks.
2286 */
2287 if (is_handle_aborted(handle))
2288 return;
2289 if (try_to_extend_transaction(handle, inode)) {
2290 ext3_mark_inode_dirty(handle, inode);
2291 ext3_journal_test_restart(handle, inode);
2292 }
2293
2294 ext3_free_blocks(handle, inode, nr, 1);
2295
2296 if (parent_bh) {
2297 /*
2298 * The block which we have just freed is
2299 * pointed to by an indirect block: journal it
2300 */
2301 BUFFER_TRACE(parent_bh, "get_write_access");
2302 if (!ext3_journal_get_write_access(handle,
2303 parent_bh)){
2304 *p = 0;
2305 BUFFER_TRACE(parent_bh,
2306 "call ext3_journal_dirty_metadata");
2307 ext3_journal_dirty_metadata(handle,
2308 parent_bh);
2309 }
2310 }
2311 }
2312 } else {
2313 /* We have reached the bottom of the tree. */
2314 BUFFER_TRACE(parent_bh, "free data blocks");
2315 ext3_free_data(handle, inode, parent_bh, first, last);
2316 }
2317 }
2318
2319 int ext3_can_truncate(struct inode *inode)
2320 {
2321 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2322 return 0;
2323 if (S_ISREG(inode->i_mode))
2324 return 1;
2325 if (S_ISDIR(inode->i_mode))
2326 return 1;
2327 if (S_ISLNK(inode->i_mode))
2328 return !ext3_inode_is_fast_symlink(inode);
2329 return 0;
2330 }
2331
2332 /*
2333 * ext3_truncate()
2334 *
2335 * We block out ext3_get_block() block instantiations across the entire
2336 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2337 * simultaneously on behalf of the same inode.
2338 *
2339 * As we work through the truncate and commmit bits of it to the journal there
2340 * is one core, guiding principle: the file's tree must always be consistent on
2341 * disk. We must be able to restart the truncate after a crash.
2342 *
2343 * The file's tree may be transiently inconsistent in memory (although it
2344 * probably isn't), but whenever we close off and commit a journal transaction,
2345 * the contents of (the filesystem + the journal) must be consistent and
2346 * restartable. It's pretty simple, really: bottom up, right to left (although
2347 * left-to-right works OK too).
2348 *
2349 * Note that at recovery time, journal replay occurs *before* the restart of
2350 * truncate against the orphan inode list.
2351 *
2352 * The committed inode has the new, desired i_size (which is the same as
2353 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
2354 * that this inode's truncate did not complete and it will again call
2355 * ext3_truncate() to have another go. So there will be instantiated blocks
2356 * to the right of the truncation point in a crashed ext3 filesystem. But
2357 * that's fine - as long as they are linked from the inode, the post-crash
2358 * ext3_truncate() run will find them and release them.
2359 */
2360 void ext3_truncate(struct inode *inode)
2361 {
2362 handle_t *handle;
2363 struct ext3_inode_info *ei = EXT3_I(inode);
2364 __le32 *i_data = ei->i_data;
2365 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2366 struct address_space *mapping = inode->i_mapping;
2367 int offsets[4];
2368 Indirect chain[4];
2369 Indirect *partial;
2370 __le32 nr = 0;
2371 int n;
2372 long last_block;
2373 unsigned blocksize = inode->i_sb->s_blocksize;
2374 struct page *page;
2375
2376 if (!ext3_can_truncate(inode))
2377 goto out_notrans;
2378
2379 if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2380 ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE;
2381
2382 /*
2383 * We have to lock the EOF page here, because lock_page() nests
2384 * outside journal_start().
2385 */
2386 if ((inode->i_size & (blocksize - 1)) == 0) {
2387 /* Block boundary? Nothing to do */
2388 page = NULL;
2389 } else {
2390 page = grab_cache_page(mapping,
2391 inode->i_size >> PAGE_CACHE_SHIFT);
2392 if (!page)
2393 goto out_notrans;
2394 }
2395
2396 handle = start_transaction(inode);
2397 if (IS_ERR(handle)) {
2398 if (page) {
2399 clear_highpage(page);
2400 flush_dcache_page(page);
2401 unlock_page(page);
2402 page_cache_release(page);
2403 }
2404 goto out_notrans;
2405 }
2406
2407 last_block = (inode->i_size + blocksize-1)
2408 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2409
2410 if (page)
2411 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2412
2413 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2414 if (n == 0)
2415 goto out_stop; /* error */
2416
2417 /*
2418 * OK. This truncate is going to happen. We add the inode to the
2419 * orphan list, so that if this truncate spans multiple transactions,
2420 * and we crash, we will resume the truncate when the filesystem
2421 * recovers. It also marks the inode dirty, to catch the new size.
2422 *
2423 * Implication: the file must always be in a sane, consistent
2424 * truncatable state while each transaction commits.
2425 */
2426 if (ext3_orphan_add(handle, inode))
2427 goto out_stop;
2428
2429 /*
2430 * The orphan list entry will now protect us from any crash which
2431 * occurs before the truncate completes, so it is now safe to propagate
2432 * the new, shorter inode size (held for now in i_size) into the
2433 * on-disk inode. We do this via i_disksize, which is the value which
2434 * ext3 *really* writes onto the disk inode.
2435 */
2436 ei->i_disksize = inode->i_size;
2437
2438 /*
2439 * From here we block out all ext3_get_block() callers who want to
2440 * modify the block allocation tree.
2441 */
2442 mutex_lock(&ei->truncate_mutex);
2443
2444 if (n == 1) { /* direct blocks */
2445 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2446 i_data + EXT3_NDIR_BLOCKS);
2447 goto do_indirects;
2448 }
2449
2450 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2451 /* Kill the top of shared branch (not detached) */
2452 if (nr) {
2453 if (partial == chain) {
2454 /* Shared branch grows from the inode */
2455 ext3_free_branches(handle, inode, NULL,
2456 &nr, &nr+1, (chain+n-1) - partial);
2457 *partial->p = 0;
2458 /*
2459 * We mark the inode dirty prior to restart,
2460 * and prior to stop. No need for it here.
2461 */
2462 } else {
2463 /* Shared branch grows from an indirect block */
2464 BUFFER_TRACE(partial->bh, "get_write_access");
2465 ext3_free_branches(handle, inode, partial->bh,
2466 partial->p,
2467 partial->p+1, (chain+n-1) - partial);
2468 }
2469 }
2470 /* Clear the ends of indirect blocks on the shared branch */
2471 while (partial > chain) {
2472 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2473 (__le32*)partial->bh->b_data+addr_per_block,
2474 (chain+n-1) - partial);
2475 BUFFER_TRACE(partial->bh, "call brelse");
2476 brelse (partial->bh);
2477 partial--;
2478 }
2479 do_indirects:
2480 /* Kill the remaining (whole) subtrees */
2481 switch (offsets[0]) {
2482 default:
2483 nr = i_data[EXT3_IND_BLOCK];
2484 if (nr) {
2485 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2486 i_data[EXT3_IND_BLOCK] = 0;
2487 }
2488 case EXT3_IND_BLOCK:
2489 nr = i_data[EXT3_DIND_BLOCK];
2490 if (nr) {
2491 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2492 i_data[EXT3_DIND_BLOCK] = 0;
2493 }
2494 case EXT3_DIND_BLOCK:
2495 nr = i_data[EXT3_TIND_BLOCK];
2496 if (nr) {
2497 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2498 i_data[EXT3_TIND_BLOCK] = 0;
2499 }
2500 case EXT3_TIND_BLOCK:
2501 ;
2502 }
2503
2504 ext3_discard_reservation(inode);
2505
2506 mutex_unlock(&ei->truncate_mutex);
2507 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2508 ext3_mark_inode_dirty(handle, inode);
2509
2510 /*
2511 * In a multi-transaction truncate, we only make the final transaction
2512 * synchronous
2513 */
2514 if (IS_SYNC(inode))
2515 handle->h_sync = 1;
2516 out_stop:
2517 /*
2518 * If this was a simple ftruncate(), and the file will remain alive
2519 * then we need to clear up the orphan record which we created above.
2520 * However, if this was a real unlink then we were called by
2521 * ext3_delete_inode(), and we allow that function to clean up the
2522 * orphan info for us.
2523 */
2524 if (inode->i_nlink)
2525 ext3_orphan_del(handle, inode);
2526
2527 ext3_journal_stop(handle);
2528 return;
2529 out_notrans:
2530 /*
2531 * Delete the inode from orphan list so that it doesn't stay there
2532 * forever and trigger assertion on umount.
2533 */
2534 if (inode->i_nlink)
2535 ext3_orphan_del(NULL, inode);
2536 }
2537
2538 static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2539 unsigned long ino, struct ext3_iloc *iloc)
2540 {
2541 unsigned long block_group;
2542 unsigned long offset;
2543 ext3_fsblk_t block;
2544 struct ext3_group_desc *gdp;
2545
2546 if (!ext3_valid_inum(sb, ino)) {
2547 /*
2548 * This error is already checked for in namei.c unless we are
2549 * looking at an NFS filehandle, in which case no error
2550 * report is needed
2551 */
2552 return 0;
2553 }
2554
2555 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2556 gdp = ext3_get_group_desc(sb, block_group, NULL);
2557 if (!gdp)
2558 return 0;
2559 /*
2560 * Figure out the offset within the block group inode table
2561 */
2562 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2563 EXT3_INODE_SIZE(sb);
2564 block = le32_to_cpu(gdp->bg_inode_table) +
2565 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2566
2567 iloc->block_group = block_group;
2568 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2569 return block;
2570 }
2571
2572 /*
2573 * ext3_get_inode_loc returns with an extra refcount against the inode's
2574 * underlying buffer_head on success. If 'in_mem' is true, we have all
2575 * data in memory that is needed to recreate the on-disk version of this
2576 * inode.
2577 */
2578 static int __ext3_get_inode_loc(struct inode *inode,
2579 struct ext3_iloc *iloc, int in_mem)
2580 {
2581 ext3_fsblk_t block;
2582 struct buffer_head *bh;
2583
2584 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2585 if (!block)
2586 return -EIO;
2587
2588 bh = sb_getblk(inode->i_sb, block);
2589 if (!bh) {
2590 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2591 "unable to read inode block - "
2592 "inode=%lu, block="E3FSBLK,
2593 inode->i_ino, block);
2594 return -EIO;
2595 }
2596 if (!buffer_uptodate(bh)) {
2597 lock_buffer(bh);
2598
2599 /*
2600 * If the buffer has the write error flag, we have failed
2601 * to write out another inode in the same block. In this
2602 * case, we don't have to read the block because we may
2603 * read the old inode data successfully.
2604 */
2605 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2606 set_buffer_uptodate(bh);
2607
2608 if (buffer_uptodate(bh)) {
2609 /* someone brought it uptodate while we waited */
2610 unlock_buffer(bh);
2611 goto has_buffer;
2612 }
2613
2614 /*
2615 * If we have all information of the inode in memory and this
2616 * is the only valid inode in the block, we need not read the
2617 * block.
2618 */
2619 if (in_mem) {
2620 struct buffer_head *bitmap_bh;
2621 struct ext3_group_desc *desc;
2622 int inodes_per_buffer;
2623 int inode_offset, i;
2624 int block_group;
2625 int start;
2626
2627 block_group = (inode->i_ino - 1) /
2628 EXT3_INODES_PER_GROUP(inode->i_sb);
2629 inodes_per_buffer = bh->b_size /
2630 EXT3_INODE_SIZE(inode->i_sb);
2631 inode_offset = ((inode->i_ino - 1) %
2632 EXT3_INODES_PER_GROUP(inode->i_sb));
2633 start = inode_offset & ~(inodes_per_buffer - 1);
2634
2635 /* Is the inode bitmap in cache? */
2636 desc = ext3_get_group_desc(inode->i_sb,
2637 block_group, NULL);
2638 if (!desc)
2639 goto make_io;
2640
2641 bitmap_bh = sb_getblk(inode->i_sb,
2642 le32_to_cpu(desc->bg_inode_bitmap));
2643 if (!bitmap_bh)
2644 goto make_io;
2645
2646 /*
2647 * If the inode bitmap isn't in cache then the
2648 * optimisation may end up performing two reads instead
2649 * of one, so skip it.
2650 */
2651 if (!buffer_uptodate(bitmap_bh)) {
2652 brelse(bitmap_bh);
2653 goto make_io;
2654 }
2655 for (i = start; i < start + inodes_per_buffer; i++) {
2656 if (i == inode_offset)
2657 continue;
2658 if (ext3_test_bit(i, bitmap_bh->b_data))
2659 break;
2660 }
2661 brelse(bitmap_bh);
2662 if (i == start + inodes_per_buffer) {
2663 /* all other inodes are free, so skip I/O */
2664 memset(bh->b_data, 0, bh->b_size);
2665 set_buffer_uptodate(bh);
2666 unlock_buffer(bh);
2667 goto has_buffer;
2668 }
2669 }
2670
2671 make_io:
2672 /*
2673 * There are other valid inodes in the buffer, this inode
2674 * has in-inode xattrs, or we don't have this inode in memory.
2675 * Read the block from disk.
2676 */
2677 get_bh(bh);
2678 bh->b_end_io = end_buffer_read_sync;
2679 submit_bh(READ_META, bh);
2680 wait_on_buffer(bh);
2681 if (!buffer_uptodate(bh)) {
2682 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2683 "unable to read inode block - "
2684 "inode=%lu, block="E3FSBLK,
2685 inode->i_ino, block);
2686 brelse(bh);
2687 return -EIO;
2688 }
2689 }
2690 has_buffer:
2691 iloc->bh = bh;
2692 return 0;
2693 }
2694
2695 int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2696 {
2697 /* We have all inode data except xattrs in memory here. */
2698 return __ext3_get_inode_loc(inode, iloc,
2699 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2700 }
2701
2702 void ext3_set_inode_flags(struct inode *inode)
2703 {
2704 unsigned int flags = EXT3_I(inode)->i_flags;
2705
2706 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2707 if (flags & EXT3_SYNC_FL)
2708 inode->i_flags |= S_SYNC;
2709 if (flags & EXT3_APPEND_FL)
2710 inode->i_flags |= S_APPEND;
2711 if (flags & EXT3_IMMUTABLE_FL)
2712 inode->i_flags |= S_IMMUTABLE;
2713 if (flags & EXT3_NOATIME_FL)
2714 inode->i_flags |= S_NOATIME;
2715 if (flags & EXT3_DIRSYNC_FL)
2716 inode->i_flags |= S_DIRSYNC;
2717 }
2718
2719 /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2720 void ext3_get_inode_flags(struct ext3_inode_info *ei)
2721 {
2722 unsigned int flags = ei->vfs_inode.i_flags;
2723
2724 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2725 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2726 if (flags & S_SYNC)
2727 ei->i_flags |= EXT3_SYNC_FL;
2728 if (flags & S_APPEND)
2729 ei->i_flags |= EXT3_APPEND_FL;
2730 if (flags & S_IMMUTABLE)
2731 ei->i_flags |= EXT3_IMMUTABLE_FL;
2732 if (flags & S_NOATIME)
2733 ei->i_flags |= EXT3_NOATIME_FL;
2734 if (flags & S_DIRSYNC)
2735 ei->i_flags |= EXT3_DIRSYNC_FL;
2736 }
2737
2738 struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2739 {
2740 struct ext3_iloc iloc;
2741 struct ext3_inode *raw_inode;
2742 struct ext3_inode_info *ei;
2743 struct buffer_head *bh;
2744 struct inode *inode;
2745 long ret;
2746 int block;
2747
2748 inode = iget_locked(sb, ino);
2749 if (!inode)
2750 return ERR_PTR(-ENOMEM);
2751 if (!(inode->i_state & I_NEW))
2752 return inode;
2753
2754 ei = EXT3_I(inode);
2755 ei->i_block_alloc_info = NULL;
2756
2757 ret = __ext3_get_inode_loc(inode, &iloc, 0);
2758 if (ret < 0)
2759 goto bad_inode;
2760 bh = iloc.bh;
2761 raw_inode = ext3_raw_inode(&iloc);
2762 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2763 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2764 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2765 if(!(test_opt (inode->i_sb, NO_UID32))) {
2766 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2767 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2768 }
2769 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2770 inode->i_size = le32_to_cpu(raw_inode->i_size);
2771 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2772 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2773 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2774 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2775
2776 ei->i_state = 0;
2777 ei->i_dir_start_lookup = 0;
2778 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2779 /* We now have enough fields to check if the inode was active or not.
2780 * This is needed because nfsd might try to access dead inodes
2781 * the test is that same one that e2fsck uses
2782 * NeilBrown 1999oct15
2783 */
2784 if (inode->i_nlink == 0) {
2785 if (inode->i_mode == 0 ||
2786 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2787 /* this inode is deleted */
2788 brelse (bh);
2789 ret = -ESTALE;
2790 goto bad_inode;
2791 }
2792 /* The only unlinked inodes we let through here have
2793 * valid i_mode and are being read by the orphan
2794 * recovery code: that's fine, we're about to complete
2795 * the process of deleting those. */
2796 }
2797 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2798 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2799 #ifdef EXT3_FRAGMENTS
2800 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2801 ei->i_frag_no = raw_inode->i_frag;
2802 ei->i_frag_size = raw_inode->i_fsize;
2803 #endif
2804 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2805 if (!S_ISREG(inode->i_mode)) {
2806 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2807 } else {
2808 inode->i_size |=
2809 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2810 }
2811 ei->i_disksize = inode->i_size;
2812 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2813 ei->i_block_group = iloc.block_group;
2814 /*
2815 * NOTE! The in-memory inode i_data array is in little-endian order
2816 * even on big-endian machines: we do NOT byteswap the block numbers!
2817 */
2818 for (block = 0; block < EXT3_N_BLOCKS; block++)
2819 ei->i_data[block] = raw_inode->i_block[block];
2820 INIT_LIST_HEAD(&ei->i_orphan);
2821
2822 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2823 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2824 /*
2825 * When mke2fs creates big inodes it does not zero out
2826 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2827 * so ignore those first few inodes.
2828 */
2829 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2830 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2831 EXT3_INODE_SIZE(inode->i_sb)) {
2832 brelse (bh);
2833 ret = -EIO;
2834 goto bad_inode;
2835 }
2836 if (ei->i_extra_isize == 0) {
2837 /* The extra space is currently unused. Use it. */
2838 ei->i_extra_isize = sizeof(struct ext3_inode) -
2839 EXT3_GOOD_OLD_INODE_SIZE;
2840 } else {
2841 __le32 *magic = (void *)raw_inode +
2842 EXT3_GOOD_OLD_INODE_SIZE +
2843 ei->i_extra_isize;
2844 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2845 ei->i_state |= EXT3_STATE_XATTR;
2846 }
2847 } else
2848 ei->i_extra_isize = 0;
2849
2850 if (S_ISREG(inode->i_mode)) {
2851 inode->i_op = &ext3_file_inode_operations;
2852 inode->i_fop = &ext3_file_operations;
2853 ext3_set_aops(inode);
2854 } else if (S_ISDIR(inode->i_mode)) {
2855 inode->i_op = &ext3_dir_inode_operations;
2856 inode->i_fop = &ext3_dir_operations;
2857 } else if (S_ISLNK(inode->i_mode)) {
2858 if (ext3_inode_is_fast_symlink(inode)) {
2859 inode->i_op = &ext3_fast_symlink_inode_operations;
2860 nd_terminate_link(ei->i_data, inode->i_size,
2861 sizeof(ei->i_data) - 1);
2862 } else {
2863 inode->i_op = &ext3_symlink_inode_operations;
2864 ext3_set_aops(inode);
2865 }
2866 } else {
2867 inode->i_op = &ext3_special_inode_operations;
2868 if (raw_inode->i_block[0])
2869 init_special_inode(inode, inode->i_mode,
2870 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2871 else
2872 init_special_inode(inode, inode->i_mode,
2873 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2874 }
2875 brelse (iloc.bh);
2876 ext3_set_inode_flags(inode);
2877 unlock_new_inode(inode);
2878 return inode;
2879
2880 bad_inode:
2881 iget_failed(inode);
2882 return ERR_PTR(ret);
2883 }
2884
2885 /*
2886 * Post the struct inode info into an on-disk inode location in the
2887 * buffer-cache. This gobbles the caller's reference to the
2888 * buffer_head in the inode location struct.
2889 *
2890 * The caller must have write access to iloc->bh.
2891 */
2892 static int ext3_do_update_inode(handle_t *handle,
2893 struct inode *inode,
2894 struct ext3_iloc *iloc)
2895 {
2896 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2897 struct ext3_inode_info *ei = EXT3_I(inode);
2898 struct buffer_head *bh = iloc->bh;
2899 int err = 0, rc, block;
2900
2901 /* For fields not not tracking in the in-memory inode,
2902 * initialise them to zero for new inodes. */
2903 if (ei->i_state & EXT3_STATE_NEW)
2904 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2905
2906 ext3_get_inode_flags(ei);
2907 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2908 if(!(test_opt(inode->i_sb, NO_UID32))) {
2909 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2910 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2911 /*
2912 * Fix up interoperability with old kernels. Otherwise, old inodes get
2913 * re-used with the upper 16 bits of the uid/gid intact
2914 */
2915 if(!ei->i_dtime) {
2916 raw_inode->i_uid_high =
2917 cpu_to_le16(high_16_bits(inode->i_uid));
2918 raw_inode->i_gid_high =
2919 cpu_to_le16(high_16_bits(inode->i_gid));
2920 } else {
2921 raw_inode->i_uid_high = 0;
2922 raw_inode->i_gid_high = 0;
2923 }
2924 } else {
2925 raw_inode->i_uid_low =
2926 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2927 raw_inode->i_gid_low =
2928 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2929 raw_inode->i_uid_high = 0;
2930 raw_inode->i_gid_high = 0;
2931 }
2932 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2933 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2934 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2935 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2936 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2937 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2938 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2939 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2940 #ifdef EXT3_FRAGMENTS
2941 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2942 raw_inode->i_frag = ei->i_frag_no;
2943 raw_inode->i_fsize = ei->i_frag_size;
2944 #endif
2945 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2946 if (!S_ISREG(inode->i_mode)) {
2947 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2948 } else {
2949 raw_inode->i_size_high =
2950 cpu_to_le32(ei->i_disksize >> 32);
2951 if (ei->i_disksize > 0x7fffffffULL) {
2952 struct super_block *sb = inode->i_sb;
2953 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2954 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2955 EXT3_SB(sb)->s_es->s_rev_level ==
2956 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2957 /* If this is the first large file
2958 * created, add a flag to the superblock.
2959 */
2960 err = ext3_journal_get_write_access(handle,
2961 EXT3_SB(sb)->s_sbh);
2962 if (err)
2963 goto out_brelse;
2964 ext3_update_dynamic_rev(sb);
2965 EXT3_SET_RO_COMPAT_FEATURE(sb,
2966 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2967 handle->h_sync = 1;
2968 err = ext3_journal_dirty_metadata(handle,
2969 EXT3_SB(sb)->s_sbh);
2970 }
2971 }
2972 }
2973 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2974 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2975 if (old_valid_dev(inode->i_rdev)) {
2976 raw_inode->i_block[0] =
2977 cpu_to_le32(old_encode_dev(inode->i_rdev));
2978 raw_inode->i_block[1] = 0;
2979 } else {
2980 raw_inode->i_block[0] = 0;
2981 raw_inode->i_block[1] =
2982 cpu_to_le32(new_encode_dev(inode->i_rdev));
2983 raw_inode->i_block[2] = 0;
2984 }
2985 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2986 raw_inode->i_block[block] = ei->i_data[block];
2987
2988 if (ei->i_extra_isize)
2989 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2990
2991 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2992 rc = ext3_journal_dirty_metadata(handle, bh);
2993 if (!err)
2994 err = rc;
2995 ei->i_state &= ~EXT3_STATE_NEW;
2996
2997 out_brelse:
2998 brelse (bh);
2999 ext3_std_error(inode->i_sb, err);
3000 return err;
3001 }
3002
3003 /*
3004 * ext3_write_inode()
3005 *
3006 * We are called from a few places:
3007 *
3008 * - Within generic_file_write() for O_SYNC files.
3009 * Here, there will be no transaction running. We wait for any running
3010 * trasnaction to commit.
3011 *
3012 * - Within sys_sync(), kupdate and such.
3013 * We wait on commit, if tol to.
3014 *
3015 * - Within prune_icache() (PF_MEMALLOC == true)
3016 * Here we simply return. We can't afford to block kswapd on the
3017 * journal commit.
3018 *
3019 * In all cases it is actually safe for us to return without doing anything,
3020 * because the inode has been copied into a raw inode buffer in
3021 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
3022 * knfsd.
3023 *
3024 * Note that we are absolutely dependent upon all inode dirtiers doing the
3025 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3026 * which we are interested.
3027 *
3028 * It would be a bug for them to not do this. The code:
3029 *
3030 * mark_inode_dirty(inode)
3031 * stuff();
3032 * inode->i_size = expr;
3033 *
3034 * is in error because a kswapd-driven write_inode() could occur while
3035 * `stuff()' is running, and the new i_size will be lost. Plus the inode
3036 * will no longer be on the superblock's dirty inode list.
3037 */
3038 int ext3_write_inode(struct inode *inode, int wait)
3039 {
3040 if (current->flags & PF_MEMALLOC)
3041 return 0;
3042
3043 if (ext3_journal_current_handle()) {
3044 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3045 dump_stack();
3046 return -EIO;
3047 }
3048
3049 if (!wait)
3050 return 0;
3051
3052 return ext3_force_commit(inode->i_sb);
3053 }
3054
3055 /*
3056 * ext3_setattr()
3057 *
3058 * Called from notify_change.
3059 *
3060 * We want to trap VFS attempts to truncate the file as soon as
3061 * possible. In particular, we want to make sure that when the VFS
3062 * shrinks i_size, we put the inode on the orphan list and modify
3063 * i_disksize immediately, so that during the subsequent flushing of
3064 * dirty pages and freeing of disk blocks, we can guarantee that any
3065 * commit will leave the blocks being flushed in an unused state on
3066 * disk. (On recovery, the inode will get truncated and the blocks will
3067 * be freed, so we have a strong guarantee that no future commit will
3068 * leave these blocks visible to the user.)
3069 *
3070 * Called with inode->sem down.
3071 */
3072 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3073 {
3074 struct inode *inode = dentry->d_inode;
3075 int error, rc = 0;
3076 const unsigned int ia_valid = attr->ia_valid;
3077
3078 error = inode_change_ok(inode, attr);
3079 if (error)
3080 return error;
3081
3082 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3083 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3084 handle_t *handle;
3085
3086 /* (user+group)*(old+new) structure, inode write (sb,
3087 * inode block, ? - but truncate inode update has it) */
3088 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
3089 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3090 if (IS_ERR(handle)) {
3091 error = PTR_ERR(handle);
3092 goto err_out;
3093 }
3094 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
3095 if (error) {
3096 ext3_journal_stop(handle);
3097 return error;
3098 }
3099 /* Update corresponding info in inode so that everything is in
3100 * one transaction */
3101 if (attr->ia_valid & ATTR_UID)
3102 inode->i_uid = attr->ia_uid;
3103 if (attr->ia_valid & ATTR_GID)
3104 inode->i_gid = attr->ia_gid;
3105 error = ext3_mark_inode_dirty(handle, inode);
3106 ext3_journal_stop(handle);
3107 }
3108
3109 if (S_ISREG(inode->i_mode) &&
3110 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3111 handle_t *handle;
3112
3113 handle = ext3_journal_start(inode, 3);
3114 if (IS_ERR(handle)) {
3115 error = PTR_ERR(handle);
3116 goto err_out;
3117 }
3118
3119 error = ext3_orphan_add(handle, inode);
3120 EXT3_I(inode)->i_disksize = attr->ia_size;
3121 rc = ext3_mark_inode_dirty(handle, inode);
3122 if (!error)
3123 error = rc;
3124 ext3_journal_stop(handle);
3125 }
3126
3127 rc = inode_setattr(inode, attr);
3128
3129 if (!rc && (ia_valid & ATTR_MODE))
3130 rc = ext3_acl_chmod(inode);
3131
3132 err_out:
3133 ext3_std_error(inode->i_sb, error);
3134 if (!error)
3135 error = rc;
3136 return error;
3137 }
3138
3139
3140 /*
3141 * How many blocks doth make a writepage()?
3142 *
3143 * With N blocks per page, it may be:
3144 * N data blocks
3145 * 2 indirect block
3146 * 2 dindirect
3147 * 1 tindirect
3148 * N+5 bitmap blocks (from the above)
3149 * N+5 group descriptor summary blocks
3150 * 1 inode block
3151 * 1 superblock.
3152 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3153 *
3154 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3155 *
3156 * With ordered or writeback data it's the same, less the N data blocks.
3157 *
3158 * If the inode's direct blocks can hold an integral number of pages then a
3159 * page cannot straddle two indirect blocks, and we can only touch one indirect
3160 * and dindirect block, and the "5" above becomes "3".
3161 *
3162 * This still overestimates under most circumstances. If we were to pass the
3163 * start and end offsets in here as well we could do block_to_path() on each
3164 * block and work out the exact number of indirects which are touched. Pah.
3165 */
3166
3167 static int ext3_writepage_trans_blocks(struct inode *inode)
3168 {
3169 int bpp = ext3_journal_blocks_per_page(inode);
3170 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3171 int ret;
3172
3173 if (ext3_should_journal_data(inode))
3174 ret = 3 * (bpp + indirects) + 2;
3175 else
3176 ret = 2 * (bpp + indirects) + 2;
3177
3178 #ifdef CONFIG_QUOTA
3179 /* We know that structure was already allocated during vfs_dq_init so
3180 * we will be updating only the data blocks + inodes */
3181 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3182 #endif
3183
3184 return ret;
3185 }
3186
3187 /*
3188 * The caller must have previously called ext3_reserve_inode_write().
3189 * Give this, we know that the caller already has write access to iloc->bh.
3190 */
3191 int ext3_mark_iloc_dirty(handle_t *handle,
3192 struct inode *inode, struct ext3_iloc *iloc)
3193 {
3194 int err = 0;
3195
3196 /* the do_update_inode consumes one bh->b_count */
3197 get_bh(iloc->bh);
3198
3199 /* ext3_do_update_inode() does journal_dirty_metadata */
3200 err = ext3_do_update_inode(handle, inode, iloc);
3201 put_bh(iloc->bh);
3202 return err;
3203 }
3204
3205 /*
3206 * On success, We end up with an outstanding reference count against
3207 * iloc->bh. This _must_ be cleaned up later.
3208 */
3209
3210 int
3211 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3212 struct ext3_iloc *iloc)
3213 {
3214 int err = 0;
3215 if (handle) {
3216 err = ext3_get_inode_loc(inode, iloc);
3217 if (!err) {
3218 BUFFER_TRACE(iloc->bh, "get_write_access");
3219 err = ext3_journal_get_write_access(handle, iloc->bh);
3220 if (err) {
3221 brelse(iloc->bh);
3222 iloc->bh = NULL;
3223 }
3224 }
3225 }
3226 ext3_std_error(inode->i_sb, err);
3227 return err;
3228 }
3229
3230 /*
3231 * What we do here is to mark the in-core inode as clean with respect to inode
3232 * dirtiness (it may still be data-dirty).
3233 * This means that the in-core inode may be reaped by prune_icache
3234 * without having to perform any I/O. This is a very good thing,
3235 * because *any* task may call prune_icache - even ones which
3236 * have a transaction open against a different journal.
3237 *
3238 * Is this cheating? Not really. Sure, we haven't written the
3239 * inode out, but prune_icache isn't a user-visible syncing function.
3240 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3241 * we start and wait on commits.
3242 *
3243 * Is this efficient/effective? Well, we're being nice to the system
3244 * by cleaning up our inodes proactively so they can be reaped
3245 * without I/O. But we are potentially leaving up to five seconds'
3246 * worth of inodes floating about which prune_icache wants us to
3247 * write out. One way to fix that would be to get prune_icache()
3248 * to do a write_super() to free up some memory. It has the desired
3249 * effect.
3250 */
3251 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3252 {
3253 struct ext3_iloc iloc;
3254 int err;
3255
3256 might_sleep();
3257 err = ext3_reserve_inode_write(handle, inode, &iloc);
3258 if (!err)
3259 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3260 return err;
3261 }
3262
3263 /*
3264 * ext3_dirty_inode() is called from __mark_inode_dirty()
3265 *
3266 * We're really interested in the case where a file is being extended.
3267 * i_size has been changed by generic_commit_write() and we thus need
3268 * to include the updated inode in the current transaction.
3269 *
3270 * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
3271 * are allocated to the file.
3272 *
3273 * If the inode is marked synchronous, we don't honour that here - doing
3274 * so would cause a commit on atime updates, which we don't bother doing.
3275 * We handle synchronous inodes at the highest possible level.
3276 */
3277 void ext3_dirty_inode(struct inode *inode)
3278 {
3279 handle_t *current_handle = ext3_journal_current_handle();
3280 handle_t *handle;
3281
3282 handle = ext3_journal_start(inode, 2);
3283 if (IS_ERR(handle))
3284 goto out;
3285 if (current_handle &&
3286 current_handle->h_transaction != handle->h_transaction) {
3287 /* This task has a transaction open against a different fs */
3288 printk(KERN_EMERG "%s: transactions do not match!\n",
3289 __func__);
3290 } else {
3291 jbd_debug(5, "marking dirty. outer handle=%p\n",
3292 current_handle);
3293 ext3_mark_inode_dirty(handle, inode);
3294 }
3295 ext3_journal_stop(handle);
3296 out:
3297 return;
3298 }
3299
3300 #if 0
3301 /*
3302 * Bind an inode's backing buffer_head into this transaction, to prevent
3303 * it from being flushed to disk early. Unlike
3304 * ext3_reserve_inode_write, this leaves behind no bh reference and
3305 * returns no iloc structure, so the caller needs to repeat the iloc
3306 * lookup to mark the inode dirty later.
3307 */
3308 static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3309 {
3310 struct ext3_iloc iloc;
3311
3312 int err = 0;
3313 if (handle) {
3314 err = ext3_get_inode_loc(inode, &iloc);
3315 if (!err) {
3316 BUFFER_TRACE(iloc.bh, "get_write_access");
3317 err = journal_get_write_access(handle, iloc.bh);
3318 if (!err)
3319 err = ext3_journal_dirty_metadata(handle,
3320 iloc.bh);
3321 brelse(iloc.bh);
3322 }
3323 }
3324 ext3_std_error(inode->i_sb, err);
3325 return err;
3326 }
3327 #endif
3328
3329 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3330 {
3331 journal_t *journal;
3332 handle_t *handle;
3333 int err;
3334
3335 /*
3336 * We have to be very careful here: changing a data block's
3337 * journaling status dynamically is dangerous. If we write a
3338 * data block to the journal, change the status and then delete
3339 * that block, we risk forgetting to revoke the old log record
3340 * from the journal and so a subsequent replay can corrupt data.
3341 * So, first we make sure that the journal is empty and that
3342 * nobody is changing anything.
3343 */
3344
3345 journal = EXT3_JOURNAL(inode);
3346 if (is_journal_aborted(journal))
3347 return -EROFS;
3348
3349 journal_lock_updates(journal);
3350 journal_flush(journal);
3351
3352 /*
3353 * OK, there are no updates running now, and all cached data is
3354 * synced to disk. We are now in a completely consistent state
3355 * which doesn't have anything in the journal, and we know that
3356 * no filesystem updates are running, so it is safe to modify
3357 * the inode's in-core data-journaling state flag now.
3358 */
3359
3360 if (val)
3361 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3362 else
3363 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3364 ext3_set_aops(inode);
3365
3366 journal_unlock_updates(journal);
3367
3368 /* Finally we can mark the inode as dirty. */
3369
3370 handle = ext3_journal_start(inode, 1);
3371 if (IS_ERR(handle))
3372 return PTR_ERR(handle);
3373
3374 err = ext3_mark_inode_dirty(handle, inode);
3375 handle->h_sync = 1;
3376 ext3_journal_stop(handle);
3377 ext3_std_error(inode->i_sb, err);
3378
3379 return err;
3380 }
This page took 0.123831 seconds and 5 git commands to generate.