ASoC: Add fully_routed flag to Speyside machines
[deliverable/linux.git] / fs / ext4 / inode.c
1 /*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
17 *
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19 */
20
21 #include <linux/module.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/jbd2.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/quotaops.h>
28 #include <linux/string.h>
29 #include <linux/buffer_head.h>
30 #include <linux/writeback.h>
31 #include <linux/pagevec.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/uio.h>
35 #include <linux/bio.h>
36 #include <linux/workqueue.h>
37 #include <linux/kernel.h>
38 #include <linux/printk.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41
42 #include "ext4_jbd2.h"
43 #include "xattr.h"
44 #include "acl.h"
45 #include "truncate.h"
46
47 #include <trace/events/ext4.h>
48
49 #define MPAGE_DA_EXTENT_TAIL 0x01
50
51 static inline int ext4_begin_ordered_truncate(struct inode *inode,
52 loff_t new_size)
53 {
54 trace_ext4_begin_ordered_truncate(inode, new_size);
55 /*
56 * If jinode is zero, then we never opened the file for
57 * writing, so there's no need to call
58 * jbd2_journal_begin_ordered_truncate() since there's no
59 * outstanding writes we need to flush.
60 */
61 if (!EXT4_I(inode)->jinode)
62 return 0;
63 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
64 EXT4_I(inode)->jinode,
65 new_size);
66 }
67
68 static void ext4_invalidatepage(struct page *page, unsigned long offset);
69 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
70 struct buffer_head *bh_result, int create);
71 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
72 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
73 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
74 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
75
76 /*
77 * Test whether an inode is a fast symlink.
78 */
79 static int ext4_inode_is_fast_symlink(struct inode *inode)
80 {
81 int ea_blocks = EXT4_I(inode)->i_file_acl ?
82 (inode->i_sb->s_blocksize >> 9) : 0;
83
84 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
85 }
86
87 /*
88 * Restart the transaction associated with *handle. This does a commit,
89 * so before we call here everything must be consistently dirtied against
90 * this transaction.
91 */
92 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
93 int nblocks)
94 {
95 int ret;
96
97 /*
98 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
99 * moment, get_block can be called only for blocks inside i_size since
100 * page cache has been already dropped and writes are blocked by
101 * i_mutex. So we can safely drop the i_data_sem here.
102 */
103 BUG_ON(EXT4_JOURNAL(inode) == NULL);
104 jbd_debug(2, "restarting handle %p\n", handle);
105 up_write(&EXT4_I(inode)->i_data_sem);
106 ret = ext4_journal_restart(handle, nblocks);
107 down_write(&EXT4_I(inode)->i_data_sem);
108 ext4_discard_preallocations(inode);
109
110 return ret;
111 }
112
113 /*
114 * Called at the last iput() if i_nlink is zero.
115 */
116 void ext4_evict_inode(struct inode *inode)
117 {
118 handle_t *handle;
119 int err;
120
121 trace_ext4_evict_inode(inode);
122
123 ext4_ioend_wait(inode);
124
125 if (inode->i_nlink) {
126 /*
127 * When journalling data dirty buffers are tracked only in the
128 * journal. So although mm thinks everything is clean and
129 * ready for reaping the inode might still have some pages to
130 * write in the running transaction or waiting to be
131 * checkpointed. Thus calling jbd2_journal_invalidatepage()
132 * (via truncate_inode_pages()) to discard these buffers can
133 * cause data loss. Also even if we did not discard these
134 * buffers, we would have no way to find them after the inode
135 * is reaped and thus user could see stale data if he tries to
136 * read them before the transaction is checkpointed. So be
137 * careful and force everything to disk here... We use
138 * ei->i_datasync_tid to store the newest transaction
139 * containing inode's data.
140 *
141 * Note that directories do not have this problem because they
142 * don't use page cache.
143 */
144 if (ext4_should_journal_data(inode) &&
145 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
146 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
147 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
148
149 jbd2_log_start_commit(journal, commit_tid);
150 jbd2_log_wait_commit(journal, commit_tid);
151 filemap_write_and_wait(&inode->i_data);
152 }
153 truncate_inode_pages(&inode->i_data, 0);
154 goto no_delete;
155 }
156
157 if (!is_bad_inode(inode))
158 dquot_initialize(inode);
159
160 if (ext4_should_order_data(inode))
161 ext4_begin_ordered_truncate(inode, 0);
162 truncate_inode_pages(&inode->i_data, 0);
163
164 if (is_bad_inode(inode))
165 goto no_delete;
166
167 handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
168 if (IS_ERR(handle)) {
169 ext4_std_error(inode->i_sb, PTR_ERR(handle));
170 /*
171 * If we're going to skip the normal cleanup, we still need to
172 * make sure that the in-core orphan linked list is properly
173 * cleaned up.
174 */
175 ext4_orphan_del(NULL, inode);
176 goto no_delete;
177 }
178
179 if (IS_SYNC(inode))
180 ext4_handle_sync(handle);
181 inode->i_size = 0;
182 err = ext4_mark_inode_dirty(handle, inode);
183 if (err) {
184 ext4_warning(inode->i_sb,
185 "couldn't mark inode dirty (err %d)", err);
186 goto stop_handle;
187 }
188 if (inode->i_blocks)
189 ext4_truncate(inode);
190
191 /*
192 * ext4_ext_truncate() doesn't reserve any slop when it
193 * restarts journal transactions; therefore there may not be
194 * enough credits left in the handle to remove the inode from
195 * the orphan list and set the dtime field.
196 */
197 if (!ext4_handle_has_enough_credits(handle, 3)) {
198 err = ext4_journal_extend(handle, 3);
199 if (err > 0)
200 err = ext4_journal_restart(handle, 3);
201 if (err != 0) {
202 ext4_warning(inode->i_sb,
203 "couldn't extend journal (err %d)", err);
204 stop_handle:
205 ext4_journal_stop(handle);
206 ext4_orphan_del(NULL, inode);
207 goto no_delete;
208 }
209 }
210
211 /*
212 * Kill off the orphan record which ext4_truncate created.
213 * AKPM: I think this can be inside the above `if'.
214 * Note that ext4_orphan_del() has to be able to cope with the
215 * deletion of a non-existent orphan - this is because we don't
216 * know if ext4_truncate() actually created an orphan record.
217 * (Well, we could do this if we need to, but heck - it works)
218 */
219 ext4_orphan_del(handle, inode);
220 EXT4_I(inode)->i_dtime = get_seconds();
221
222 /*
223 * One subtle ordering requirement: if anything has gone wrong
224 * (transaction abort, IO errors, whatever), then we can still
225 * do these next steps (the fs will already have been marked as
226 * having errors), but we can't free the inode if the mark_dirty
227 * fails.
228 */
229 if (ext4_mark_inode_dirty(handle, inode))
230 /* If that failed, just do the required in-core inode clear. */
231 ext4_clear_inode(inode);
232 else
233 ext4_free_inode(handle, inode);
234 ext4_journal_stop(handle);
235 return;
236 no_delete:
237 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
238 }
239
240 #ifdef CONFIG_QUOTA
241 qsize_t *ext4_get_reserved_space(struct inode *inode)
242 {
243 return &EXT4_I(inode)->i_reserved_quota;
244 }
245 #endif
246
247 /*
248 * Calculate the number of metadata blocks need to reserve
249 * to allocate a block located at @lblock
250 */
251 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
252 {
253 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
254 return ext4_ext_calc_metadata_amount(inode, lblock);
255
256 return ext4_ind_calc_metadata_amount(inode, lblock);
257 }
258
259 /*
260 * Called with i_data_sem down, which is important since we can call
261 * ext4_discard_preallocations() from here.
262 */
263 void ext4_da_update_reserve_space(struct inode *inode,
264 int used, int quota_claim)
265 {
266 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
267 struct ext4_inode_info *ei = EXT4_I(inode);
268
269 spin_lock(&ei->i_block_reservation_lock);
270 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
271 if (unlikely(used > ei->i_reserved_data_blocks)) {
272 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
273 "with only %d reserved data blocks\n",
274 __func__, inode->i_ino, used,
275 ei->i_reserved_data_blocks);
276 WARN_ON(1);
277 used = ei->i_reserved_data_blocks;
278 }
279
280 /* Update per-inode reservations */
281 ei->i_reserved_data_blocks -= used;
282 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
283 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
284 used + ei->i_allocated_meta_blocks);
285 ei->i_allocated_meta_blocks = 0;
286
287 if (ei->i_reserved_data_blocks == 0) {
288 /*
289 * We can release all of the reserved metadata blocks
290 * only when we have written all of the delayed
291 * allocation blocks.
292 */
293 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
294 ei->i_reserved_meta_blocks);
295 ei->i_reserved_meta_blocks = 0;
296 ei->i_da_metadata_calc_len = 0;
297 }
298 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
299
300 /* Update quota subsystem for data blocks */
301 if (quota_claim)
302 dquot_claim_block(inode, EXT4_C2B(sbi, used));
303 else {
304 /*
305 * We did fallocate with an offset that is already delayed
306 * allocated. So on delayed allocated writeback we should
307 * not re-claim the quota for fallocated blocks.
308 */
309 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
310 }
311
312 /*
313 * If we have done all the pending block allocations and if
314 * there aren't any writers on the inode, we can discard the
315 * inode's preallocations.
316 */
317 if ((ei->i_reserved_data_blocks == 0) &&
318 (atomic_read(&inode->i_writecount) == 0))
319 ext4_discard_preallocations(inode);
320 }
321
322 static int __check_block_validity(struct inode *inode, const char *func,
323 unsigned int line,
324 struct ext4_map_blocks *map)
325 {
326 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
327 map->m_len)) {
328 ext4_error_inode(inode, func, line, map->m_pblk,
329 "lblock %lu mapped to illegal pblock "
330 "(length %d)", (unsigned long) map->m_lblk,
331 map->m_len);
332 return -EIO;
333 }
334 return 0;
335 }
336
337 #define check_block_validity(inode, map) \
338 __check_block_validity((inode), __func__, __LINE__, (map))
339
340 /*
341 * Return the number of contiguous dirty pages in a given inode
342 * starting at page frame idx.
343 */
344 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
345 unsigned int max_pages)
346 {
347 struct address_space *mapping = inode->i_mapping;
348 pgoff_t index;
349 struct pagevec pvec;
350 pgoff_t num = 0;
351 int i, nr_pages, done = 0;
352
353 if (max_pages == 0)
354 return 0;
355 pagevec_init(&pvec, 0);
356 while (!done) {
357 index = idx;
358 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
359 PAGECACHE_TAG_DIRTY,
360 (pgoff_t)PAGEVEC_SIZE);
361 if (nr_pages == 0)
362 break;
363 for (i = 0; i < nr_pages; i++) {
364 struct page *page = pvec.pages[i];
365 struct buffer_head *bh, *head;
366
367 lock_page(page);
368 if (unlikely(page->mapping != mapping) ||
369 !PageDirty(page) ||
370 PageWriteback(page) ||
371 page->index != idx) {
372 done = 1;
373 unlock_page(page);
374 break;
375 }
376 if (page_has_buffers(page)) {
377 bh = head = page_buffers(page);
378 do {
379 if (!buffer_delay(bh) &&
380 !buffer_unwritten(bh))
381 done = 1;
382 bh = bh->b_this_page;
383 } while (!done && (bh != head));
384 }
385 unlock_page(page);
386 if (done)
387 break;
388 idx++;
389 num++;
390 if (num >= max_pages) {
391 done = 1;
392 break;
393 }
394 }
395 pagevec_release(&pvec);
396 }
397 return num;
398 }
399
400 /*
401 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
402 */
403 static void set_buffers_da_mapped(struct inode *inode,
404 struct ext4_map_blocks *map)
405 {
406 struct address_space *mapping = inode->i_mapping;
407 struct pagevec pvec;
408 int i, nr_pages;
409 pgoff_t index, end;
410
411 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
412 end = (map->m_lblk + map->m_len - 1) >>
413 (PAGE_CACHE_SHIFT - inode->i_blkbits);
414
415 pagevec_init(&pvec, 0);
416 while (index <= end) {
417 nr_pages = pagevec_lookup(&pvec, mapping, index,
418 min(end - index + 1,
419 (pgoff_t)PAGEVEC_SIZE));
420 if (nr_pages == 0)
421 break;
422 for (i = 0; i < nr_pages; i++) {
423 struct page *page = pvec.pages[i];
424 struct buffer_head *bh, *head;
425
426 if (unlikely(page->mapping != mapping) ||
427 !PageDirty(page))
428 break;
429
430 if (page_has_buffers(page)) {
431 bh = head = page_buffers(page);
432 do {
433 set_buffer_da_mapped(bh);
434 bh = bh->b_this_page;
435 } while (bh != head);
436 }
437 index++;
438 }
439 pagevec_release(&pvec);
440 }
441 }
442
443 /*
444 * The ext4_map_blocks() function tries to look up the requested blocks,
445 * and returns if the blocks are already mapped.
446 *
447 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
448 * and store the allocated blocks in the result buffer head and mark it
449 * mapped.
450 *
451 * If file type is extents based, it will call ext4_ext_map_blocks(),
452 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
453 * based files
454 *
455 * On success, it returns the number of blocks being mapped or allocate.
456 * if create==0 and the blocks are pre-allocated and uninitialized block,
457 * the result buffer head is unmapped. If the create ==1, it will make sure
458 * the buffer head is mapped.
459 *
460 * It returns 0 if plain look up failed (blocks have not been allocated), in
461 * that case, buffer head is unmapped
462 *
463 * It returns the error in case of allocation failure.
464 */
465 int ext4_map_blocks(handle_t *handle, struct inode *inode,
466 struct ext4_map_blocks *map, int flags)
467 {
468 int retval;
469
470 map->m_flags = 0;
471 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
472 "logical block %lu\n", inode->i_ino, flags, map->m_len,
473 (unsigned long) map->m_lblk);
474 /*
475 * Try to see if we can get the block without requesting a new
476 * file system block.
477 */
478 down_read((&EXT4_I(inode)->i_data_sem));
479 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
480 retval = ext4_ext_map_blocks(handle, inode, map, flags &
481 EXT4_GET_BLOCKS_KEEP_SIZE);
482 } else {
483 retval = ext4_ind_map_blocks(handle, inode, map, flags &
484 EXT4_GET_BLOCKS_KEEP_SIZE);
485 }
486 up_read((&EXT4_I(inode)->i_data_sem));
487
488 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
489 int ret = check_block_validity(inode, map);
490 if (ret != 0)
491 return ret;
492 }
493
494 /* If it is only a block(s) look up */
495 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
496 return retval;
497
498 /*
499 * Returns if the blocks have already allocated
500 *
501 * Note that if blocks have been preallocated
502 * ext4_ext_get_block() returns the create = 0
503 * with buffer head unmapped.
504 */
505 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
506 return retval;
507
508 /*
509 * When we call get_blocks without the create flag, the
510 * BH_Unwritten flag could have gotten set if the blocks
511 * requested were part of a uninitialized extent. We need to
512 * clear this flag now that we are committed to convert all or
513 * part of the uninitialized extent to be an initialized
514 * extent. This is because we need to avoid the combination
515 * of BH_Unwritten and BH_Mapped flags being simultaneously
516 * set on the buffer_head.
517 */
518 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
519
520 /*
521 * New blocks allocate and/or writing to uninitialized extent
522 * will possibly result in updating i_data, so we take
523 * the write lock of i_data_sem, and call get_blocks()
524 * with create == 1 flag.
525 */
526 down_write((&EXT4_I(inode)->i_data_sem));
527
528 /*
529 * if the caller is from delayed allocation writeout path
530 * we have already reserved fs blocks for allocation
531 * let the underlying get_block() function know to
532 * avoid double accounting
533 */
534 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
535 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
536 /*
537 * We need to check for EXT4 here because migrate
538 * could have changed the inode type in between
539 */
540 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
541 retval = ext4_ext_map_blocks(handle, inode, map, flags);
542 } else {
543 retval = ext4_ind_map_blocks(handle, inode, map, flags);
544
545 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
546 /*
547 * We allocated new blocks which will result in
548 * i_data's format changing. Force the migrate
549 * to fail by clearing migrate flags
550 */
551 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
552 }
553
554 /*
555 * Update reserved blocks/metadata blocks after successful
556 * block allocation which had been deferred till now. We don't
557 * support fallocate for non extent files. So we can update
558 * reserve space here.
559 */
560 if ((retval > 0) &&
561 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
562 ext4_da_update_reserve_space(inode, retval, 1);
563 }
564 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
565 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
566
567 /* If we have successfully mapped the delayed allocated blocks,
568 * set the BH_Da_Mapped bit on them. Its important to do this
569 * under the protection of i_data_sem.
570 */
571 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
572 set_buffers_da_mapped(inode, map);
573 }
574
575 up_write((&EXT4_I(inode)->i_data_sem));
576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 int ret = check_block_validity(inode, map);
578 if (ret != 0)
579 return ret;
580 }
581 return retval;
582 }
583
584 /* Maximum number of blocks we map for direct IO at once. */
585 #define DIO_MAX_BLOCKS 4096
586
587 static int _ext4_get_block(struct inode *inode, sector_t iblock,
588 struct buffer_head *bh, int flags)
589 {
590 handle_t *handle = ext4_journal_current_handle();
591 struct ext4_map_blocks map;
592 int ret = 0, started = 0;
593 int dio_credits;
594
595 map.m_lblk = iblock;
596 map.m_len = bh->b_size >> inode->i_blkbits;
597
598 if (flags && !handle) {
599 /* Direct IO write... */
600 if (map.m_len > DIO_MAX_BLOCKS)
601 map.m_len = DIO_MAX_BLOCKS;
602 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
603 handle = ext4_journal_start(inode, dio_credits);
604 if (IS_ERR(handle)) {
605 ret = PTR_ERR(handle);
606 return ret;
607 }
608 started = 1;
609 }
610
611 ret = ext4_map_blocks(handle, inode, &map, flags);
612 if (ret > 0) {
613 map_bh(bh, inode->i_sb, map.m_pblk);
614 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
615 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
616 ret = 0;
617 }
618 if (started)
619 ext4_journal_stop(handle);
620 return ret;
621 }
622
623 int ext4_get_block(struct inode *inode, sector_t iblock,
624 struct buffer_head *bh, int create)
625 {
626 return _ext4_get_block(inode, iblock, bh,
627 create ? EXT4_GET_BLOCKS_CREATE : 0);
628 }
629
630 /*
631 * `handle' can be NULL if create is zero
632 */
633 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
634 ext4_lblk_t block, int create, int *errp)
635 {
636 struct ext4_map_blocks map;
637 struct buffer_head *bh;
638 int fatal = 0, err;
639
640 J_ASSERT(handle != NULL || create == 0);
641
642 map.m_lblk = block;
643 map.m_len = 1;
644 err = ext4_map_blocks(handle, inode, &map,
645 create ? EXT4_GET_BLOCKS_CREATE : 0);
646
647 if (err < 0)
648 *errp = err;
649 if (err <= 0)
650 return NULL;
651 *errp = 0;
652
653 bh = sb_getblk(inode->i_sb, map.m_pblk);
654 if (!bh) {
655 *errp = -EIO;
656 return NULL;
657 }
658 if (map.m_flags & EXT4_MAP_NEW) {
659 J_ASSERT(create != 0);
660 J_ASSERT(handle != NULL);
661
662 /*
663 * Now that we do not always journal data, we should
664 * keep in mind whether this should always journal the
665 * new buffer as metadata. For now, regular file
666 * writes use ext4_get_block instead, so it's not a
667 * problem.
668 */
669 lock_buffer(bh);
670 BUFFER_TRACE(bh, "call get_create_access");
671 fatal = ext4_journal_get_create_access(handle, bh);
672 if (!fatal && !buffer_uptodate(bh)) {
673 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
674 set_buffer_uptodate(bh);
675 }
676 unlock_buffer(bh);
677 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
678 err = ext4_handle_dirty_metadata(handle, inode, bh);
679 if (!fatal)
680 fatal = err;
681 } else {
682 BUFFER_TRACE(bh, "not a new buffer");
683 }
684 if (fatal) {
685 *errp = fatal;
686 brelse(bh);
687 bh = NULL;
688 }
689 return bh;
690 }
691
692 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
693 ext4_lblk_t block, int create, int *err)
694 {
695 struct buffer_head *bh;
696
697 bh = ext4_getblk(handle, inode, block, create, err);
698 if (!bh)
699 return bh;
700 if (buffer_uptodate(bh))
701 return bh;
702 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
703 wait_on_buffer(bh);
704 if (buffer_uptodate(bh))
705 return bh;
706 put_bh(bh);
707 *err = -EIO;
708 return NULL;
709 }
710
711 static int walk_page_buffers(handle_t *handle,
712 struct buffer_head *head,
713 unsigned from,
714 unsigned to,
715 int *partial,
716 int (*fn)(handle_t *handle,
717 struct buffer_head *bh))
718 {
719 struct buffer_head *bh;
720 unsigned block_start, block_end;
721 unsigned blocksize = head->b_size;
722 int err, ret = 0;
723 struct buffer_head *next;
724
725 for (bh = head, block_start = 0;
726 ret == 0 && (bh != head || !block_start);
727 block_start = block_end, bh = next) {
728 next = bh->b_this_page;
729 block_end = block_start + blocksize;
730 if (block_end <= from || block_start >= to) {
731 if (partial && !buffer_uptodate(bh))
732 *partial = 1;
733 continue;
734 }
735 err = (*fn)(handle, bh);
736 if (!ret)
737 ret = err;
738 }
739 return ret;
740 }
741
742 /*
743 * To preserve ordering, it is essential that the hole instantiation and
744 * the data write be encapsulated in a single transaction. We cannot
745 * close off a transaction and start a new one between the ext4_get_block()
746 * and the commit_write(). So doing the jbd2_journal_start at the start of
747 * prepare_write() is the right place.
748 *
749 * Also, this function can nest inside ext4_writepage() ->
750 * block_write_full_page(). In that case, we *know* that ext4_writepage()
751 * has generated enough buffer credits to do the whole page. So we won't
752 * block on the journal in that case, which is good, because the caller may
753 * be PF_MEMALLOC.
754 *
755 * By accident, ext4 can be reentered when a transaction is open via
756 * quota file writes. If we were to commit the transaction while thus
757 * reentered, there can be a deadlock - we would be holding a quota
758 * lock, and the commit would never complete if another thread had a
759 * transaction open and was blocking on the quota lock - a ranking
760 * violation.
761 *
762 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
763 * will _not_ run commit under these circumstances because handle->h_ref
764 * is elevated. We'll still have enough credits for the tiny quotafile
765 * write.
766 */
767 static int do_journal_get_write_access(handle_t *handle,
768 struct buffer_head *bh)
769 {
770 int dirty = buffer_dirty(bh);
771 int ret;
772
773 if (!buffer_mapped(bh) || buffer_freed(bh))
774 return 0;
775 /*
776 * __block_write_begin() could have dirtied some buffers. Clean
777 * the dirty bit as jbd2_journal_get_write_access() could complain
778 * otherwise about fs integrity issues. Setting of the dirty bit
779 * by __block_write_begin() isn't a real problem here as we clear
780 * the bit before releasing a page lock and thus writeback cannot
781 * ever write the buffer.
782 */
783 if (dirty)
784 clear_buffer_dirty(bh);
785 ret = ext4_journal_get_write_access(handle, bh);
786 if (!ret && dirty)
787 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
788 return ret;
789 }
790
791 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
792 struct buffer_head *bh_result, int create);
793 static int ext4_write_begin(struct file *file, struct address_space *mapping,
794 loff_t pos, unsigned len, unsigned flags,
795 struct page **pagep, void **fsdata)
796 {
797 struct inode *inode = mapping->host;
798 int ret, needed_blocks;
799 handle_t *handle;
800 int retries = 0;
801 struct page *page;
802 pgoff_t index;
803 unsigned from, to;
804
805 trace_ext4_write_begin(inode, pos, len, flags);
806 /*
807 * Reserve one block more for addition to orphan list in case
808 * we allocate blocks but write fails for some reason
809 */
810 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
811 index = pos >> PAGE_CACHE_SHIFT;
812 from = pos & (PAGE_CACHE_SIZE - 1);
813 to = from + len;
814
815 retry:
816 handle = ext4_journal_start(inode, needed_blocks);
817 if (IS_ERR(handle)) {
818 ret = PTR_ERR(handle);
819 goto out;
820 }
821
822 /* We cannot recurse into the filesystem as the transaction is already
823 * started */
824 flags |= AOP_FLAG_NOFS;
825
826 page = grab_cache_page_write_begin(mapping, index, flags);
827 if (!page) {
828 ext4_journal_stop(handle);
829 ret = -ENOMEM;
830 goto out;
831 }
832 *pagep = page;
833
834 if (ext4_should_dioread_nolock(inode))
835 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
836 else
837 ret = __block_write_begin(page, pos, len, ext4_get_block);
838
839 if (!ret && ext4_should_journal_data(inode)) {
840 ret = walk_page_buffers(handle, page_buffers(page),
841 from, to, NULL, do_journal_get_write_access);
842 }
843
844 if (ret) {
845 unlock_page(page);
846 page_cache_release(page);
847 /*
848 * __block_write_begin may have instantiated a few blocks
849 * outside i_size. Trim these off again. Don't need
850 * i_size_read because we hold i_mutex.
851 *
852 * Add inode to orphan list in case we crash before
853 * truncate finishes
854 */
855 if (pos + len > inode->i_size && ext4_can_truncate(inode))
856 ext4_orphan_add(handle, inode);
857
858 ext4_journal_stop(handle);
859 if (pos + len > inode->i_size) {
860 ext4_truncate_failed_write(inode);
861 /*
862 * If truncate failed early the inode might
863 * still be on the orphan list; we need to
864 * make sure the inode is removed from the
865 * orphan list in that case.
866 */
867 if (inode->i_nlink)
868 ext4_orphan_del(NULL, inode);
869 }
870 }
871
872 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
873 goto retry;
874 out:
875 return ret;
876 }
877
878 /* For write_end() in data=journal mode */
879 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
880 {
881 if (!buffer_mapped(bh) || buffer_freed(bh))
882 return 0;
883 set_buffer_uptodate(bh);
884 return ext4_handle_dirty_metadata(handle, NULL, bh);
885 }
886
887 static int ext4_generic_write_end(struct file *file,
888 struct address_space *mapping,
889 loff_t pos, unsigned len, unsigned copied,
890 struct page *page, void *fsdata)
891 {
892 int i_size_changed = 0;
893 struct inode *inode = mapping->host;
894 handle_t *handle = ext4_journal_current_handle();
895
896 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
897
898 /*
899 * No need to use i_size_read() here, the i_size
900 * cannot change under us because we hold i_mutex.
901 *
902 * But it's important to update i_size while still holding page lock:
903 * page writeout could otherwise come in and zero beyond i_size.
904 */
905 if (pos + copied > inode->i_size) {
906 i_size_write(inode, pos + copied);
907 i_size_changed = 1;
908 }
909
910 if (pos + copied > EXT4_I(inode)->i_disksize) {
911 /* We need to mark inode dirty even if
912 * new_i_size is less that inode->i_size
913 * bu greater than i_disksize.(hint delalloc)
914 */
915 ext4_update_i_disksize(inode, (pos + copied));
916 i_size_changed = 1;
917 }
918 unlock_page(page);
919 page_cache_release(page);
920
921 /*
922 * Don't mark the inode dirty under page lock. First, it unnecessarily
923 * makes the holding time of page lock longer. Second, it forces lock
924 * ordering of page lock and transaction start for journaling
925 * filesystems.
926 */
927 if (i_size_changed)
928 ext4_mark_inode_dirty(handle, inode);
929
930 return copied;
931 }
932
933 /*
934 * We need to pick up the new inode size which generic_commit_write gave us
935 * `file' can be NULL - eg, when called from page_symlink().
936 *
937 * ext4 never places buffers on inode->i_mapping->private_list. metadata
938 * buffers are managed internally.
939 */
940 static int ext4_ordered_write_end(struct file *file,
941 struct address_space *mapping,
942 loff_t pos, unsigned len, unsigned copied,
943 struct page *page, void *fsdata)
944 {
945 handle_t *handle = ext4_journal_current_handle();
946 struct inode *inode = mapping->host;
947 int ret = 0, ret2;
948
949 trace_ext4_ordered_write_end(inode, pos, len, copied);
950 ret = ext4_jbd2_file_inode(handle, inode);
951
952 if (ret == 0) {
953 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
954 page, fsdata);
955 copied = ret2;
956 if (pos + len > inode->i_size && ext4_can_truncate(inode))
957 /* if we have allocated more blocks and copied
958 * less. We will have blocks allocated outside
959 * inode->i_size. So truncate them
960 */
961 ext4_orphan_add(handle, inode);
962 if (ret2 < 0)
963 ret = ret2;
964 } else {
965 unlock_page(page);
966 page_cache_release(page);
967 }
968
969 ret2 = ext4_journal_stop(handle);
970 if (!ret)
971 ret = ret2;
972
973 if (pos + len > inode->i_size) {
974 ext4_truncate_failed_write(inode);
975 /*
976 * If truncate failed early the inode might still be
977 * on the orphan list; we need to make sure the inode
978 * is removed from the orphan list in that case.
979 */
980 if (inode->i_nlink)
981 ext4_orphan_del(NULL, inode);
982 }
983
984
985 return ret ? ret : copied;
986 }
987
988 static int ext4_writeback_write_end(struct file *file,
989 struct address_space *mapping,
990 loff_t pos, unsigned len, unsigned copied,
991 struct page *page, void *fsdata)
992 {
993 handle_t *handle = ext4_journal_current_handle();
994 struct inode *inode = mapping->host;
995 int ret = 0, ret2;
996
997 trace_ext4_writeback_write_end(inode, pos, len, copied);
998 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
999 page, fsdata);
1000 copied = ret2;
1001 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1002 /* if we have allocated more blocks and copied
1003 * less. We will have blocks allocated outside
1004 * inode->i_size. So truncate them
1005 */
1006 ext4_orphan_add(handle, inode);
1007
1008 if (ret2 < 0)
1009 ret = ret2;
1010
1011 ret2 = ext4_journal_stop(handle);
1012 if (!ret)
1013 ret = ret2;
1014
1015 if (pos + len > inode->i_size) {
1016 ext4_truncate_failed_write(inode);
1017 /*
1018 * If truncate failed early the inode might still be
1019 * on the orphan list; we need to make sure the inode
1020 * is removed from the orphan list in that case.
1021 */
1022 if (inode->i_nlink)
1023 ext4_orphan_del(NULL, inode);
1024 }
1025
1026 return ret ? ret : copied;
1027 }
1028
1029 static int ext4_journalled_write_end(struct file *file,
1030 struct address_space *mapping,
1031 loff_t pos, unsigned len, unsigned copied,
1032 struct page *page, void *fsdata)
1033 {
1034 handle_t *handle = ext4_journal_current_handle();
1035 struct inode *inode = mapping->host;
1036 int ret = 0, ret2;
1037 int partial = 0;
1038 unsigned from, to;
1039 loff_t new_i_size;
1040
1041 trace_ext4_journalled_write_end(inode, pos, len, copied);
1042 from = pos & (PAGE_CACHE_SIZE - 1);
1043 to = from + len;
1044
1045 BUG_ON(!ext4_handle_valid(handle));
1046
1047 if (copied < len) {
1048 if (!PageUptodate(page))
1049 copied = 0;
1050 page_zero_new_buffers(page, from+copied, to);
1051 }
1052
1053 ret = walk_page_buffers(handle, page_buffers(page), from,
1054 to, &partial, write_end_fn);
1055 if (!partial)
1056 SetPageUptodate(page);
1057 new_i_size = pos + copied;
1058 if (new_i_size > inode->i_size)
1059 i_size_write(inode, pos+copied);
1060 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1061 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1062 if (new_i_size > EXT4_I(inode)->i_disksize) {
1063 ext4_update_i_disksize(inode, new_i_size);
1064 ret2 = ext4_mark_inode_dirty(handle, inode);
1065 if (!ret)
1066 ret = ret2;
1067 }
1068
1069 unlock_page(page);
1070 page_cache_release(page);
1071 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1072 /* if we have allocated more blocks and copied
1073 * less. We will have blocks allocated outside
1074 * inode->i_size. So truncate them
1075 */
1076 ext4_orphan_add(handle, inode);
1077
1078 ret2 = ext4_journal_stop(handle);
1079 if (!ret)
1080 ret = ret2;
1081 if (pos + len > inode->i_size) {
1082 ext4_truncate_failed_write(inode);
1083 /*
1084 * If truncate failed early the inode might still be
1085 * on the orphan list; we need to make sure the inode
1086 * is removed from the orphan list in that case.
1087 */
1088 if (inode->i_nlink)
1089 ext4_orphan_del(NULL, inode);
1090 }
1091
1092 return ret ? ret : copied;
1093 }
1094
1095 /*
1096 * Reserve a single cluster located at lblock
1097 */
1098 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1099 {
1100 int retries = 0;
1101 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1102 struct ext4_inode_info *ei = EXT4_I(inode);
1103 unsigned int md_needed;
1104 int ret;
1105
1106 /*
1107 * recalculate the amount of metadata blocks to reserve
1108 * in order to allocate nrblocks
1109 * worse case is one extent per block
1110 */
1111 repeat:
1112 spin_lock(&ei->i_block_reservation_lock);
1113 md_needed = EXT4_NUM_B2C(sbi,
1114 ext4_calc_metadata_amount(inode, lblock));
1115 trace_ext4_da_reserve_space(inode, md_needed);
1116 spin_unlock(&ei->i_block_reservation_lock);
1117
1118 /*
1119 * We will charge metadata quota at writeout time; this saves
1120 * us from metadata over-estimation, though we may go over by
1121 * a small amount in the end. Here we just reserve for data.
1122 */
1123 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1124 if (ret)
1125 return ret;
1126 /*
1127 * We do still charge estimated metadata to the sb though;
1128 * we cannot afford to run out of free blocks.
1129 */
1130 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1131 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1132 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1133 yield();
1134 goto repeat;
1135 }
1136 return -ENOSPC;
1137 }
1138 spin_lock(&ei->i_block_reservation_lock);
1139 ei->i_reserved_data_blocks++;
1140 ei->i_reserved_meta_blocks += md_needed;
1141 spin_unlock(&ei->i_block_reservation_lock);
1142
1143 return 0; /* success */
1144 }
1145
1146 static void ext4_da_release_space(struct inode *inode, int to_free)
1147 {
1148 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1149 struct ext4_inode_info *ei = EXT4_I(inode);
1150
1151 if (!to_free)
1152 return; /* Nothing to release, exit */
1153
1154 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1155
1156 trace_ext4_da_release_space(inode, to_free);
1157 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1158 /*
1159 * if there aren't enough reserved blocks, then the
1160 * counter is messed up somewhere. Since this
1161 * function is called from invalidate page, it's
1162 * harmless to return without any action.
1163 */
1164 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1165 "ino %lu, to_free %d with only %d reserved "
1166 "data blocks\n", inode->i_ino, to_free,
1167 ei->i_reserved_data_blocks);
1168 WARN_ON(1);
1169 to_free = ei->i_reserved_data_blocks;
1170 }
1171 ei->i_reserved_data_blocks -= to_free;
1172
1173 if (ei->i_reserved_data_blocks == 0) {
1174 /*
1175 * We can release all of the reserved metadata blocks
1176 * only when we have written all of the delayed
1177 * allocation blocks.
1178 * Note that in case of bigalloc, i_reserved_meta_blocks,
1179 * i_reserved_data_blocks, etc. refer to number of clusters.
1180 */
1181 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1182 ei->i_reserved_meta_blocks);
1183 ei->i_reserved_meta_blocks = 0;
1184 ei->i_da_metadata_calc_len = 0;
1185 }
1186
1187 /* update fs dirty data blocks counter */
1188 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1189
1190 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1191
1192 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1193 }
1194
1195 static void ext4_da_page_release_reservation(struct page *page,
1196 unsigned long offset)
1197 {
1198 int to_release = 0;
1199 struct buffer_head *head, *bh;
1200 unsigned int curr_off = 0;
1201 struct inode *inode = page->mapping->host;
1202 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1203 int num_clusters;
1204
1205 head = page_buffers(page);
1206 bh = head;
1207 do {
1208 unsigned int next_off = curr_off + bh->b_size;
1209
1210 if ((offset <= curr_off) && (buffer_delay(bh))) {
1211 to_release++;
1212 clear_buffer_delay(bh);
1213 clear_buffer_da_mapped(bh);
1214 }
1215 curr_off = next_off;
1216 } while ((bh = bh->b_this_page) != head);
1217
1218 /* If we have released all the blocks belonging to a cluster, then we
1219 * need to release the reserved space for that cluster. */
1220 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1221 while (num_clusters > 0) {
1222 ext4_fsblk_t lblk;
1223 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1224 ((num_clusters - 1) << sbi->s_cluster_bits);
1225 if (sbi->s_cluster_ratio == 1 ||
1226 !ext4_find_delalloc_cluster(inode, lblk, 1))
1227 ext4_da_release_space(inode, 1);
1228
1229 num_clusters--;
1230 }
1231 }
1232
1233 /*
1234 * Delayed allocation stuff
1235 */
1236
1237 /*
1238 * mpage_da_submit_io - walks through extent of pages and try to write
1239 * them with writepage() call back
1240 *
1241 * @mpd->inode: inode
1242 * @mpd->first_page: first page of the extent
1243 * @mpd->next_page: page after the last page of the extent
1244 *
1245 * By the time mpage_da_submit_io() is called we expect all blocks
1246 * to be allocated. this may be wrong if allocation failed.
1247 *
1248 * As pages are already locked by write_cache_pages(), we can't use it
1249 */
1250 static int mpage_da_submit_io(struct mpage_da_data *mpd,
1251 struct ext4_map_blocks *map)
1252 {
1253 struct pagevec pvec;
1254 unsigned long index, end;
1255 int ret = 0, err, nr_pages, i;
1256 struct inode *inode = mpd->inode;
1257 struct address_space *mapping = inode->i_mapping;
1258 loff_t size = i_size_read(inode);
1259 unsigned int len, block_start;
1260 struct buffer_head *bh, *page_bufs = NULL;
1261 int journal_data = ext4_should_journal_data(inode);
1262 sector_t pblock = 0, cur_logical = 0;
1263 struct ext4_io_submit io_submit;
1264
1265 BUG_ON(mpd->next_page <= mpd->first_page);
1266 memset(&io_submit, 0, sizeof(io_submit));
1267 /*
1268 * We need to start from the first_page to the next_page - 1
1269 * to make sure we also write the mapped dirty buffer_heads.
1270 * If we look at mpd->b_blocknr we would only be looking
1271 * at the currently mapped buffer_heads.
1272 */
1273 index = mpd->first_page;
1274 end = mpd->next_page - 1;
1275
1276 pagevec_init(&pvec, 0);
1277 while (index <= end) {
1278 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1279 if (nr_pages == 0)
1280 break;
1281 for (i = 0; i < nr_pages; i++) {
1282 int commit_write = 0, skip_page = 0;
1283 struct page *page = pvec.pages[i];
1284
1285 index = page->index;
1286 if (index > end)
1287 break;
1288
1289 if (index == size >> PAGE_CACHE_SHIFT)
1290 len = size & ~PAGE_CACHE_MASK;
1291 else
1292 len = PAGE_CACHE_SIZE;
1293 if (map) {
1294 cur_logical = index << (PAGE_CACHE_SHIFT -
1295 inode->i_blkbits);
1296 pblock = map->m_pblk + (cur_logical -
1297 map->m_lblk);
1298 }
1299 index++;
1300
1301 BUG_ON(!PageLocked(page));
1302 BUG_ON(PageWriteback(page));
1303
1304 /*
1305 * If the page does not have buffers (for
1306 * whatever reason), try to create them using
1307 * __block_write_begin. If this fails,
1308 * skip the page and move on.
1309 */
1310 if (!page_has_buffers(page)) {
1311 if (__block_write_begin(page, 0, len,
1312 noalloc_get_block_write)) {
1313 skip_page:
1314 unlock_page(page);
1315 continue;
1316 }
1317 commit_write = 1;
1318 }
1319
1320 bh = page_bufs = page_buffers(page);
1321 block_start = 0;
1322 do {
1323 if (!bh)
1324 goto skip_page;
1325 if (map && (cur_logical >= map->m_lblk) &&
1326 (cur_logical <= (map->m_lblk +
1327 (map->m_len - 1)))) {
1328 if (buffer_delay(bh)) {
1329 clear_buffer_delay(bh);
1330 bh->b_blocknr = pblock;
1331 }
1332 if (buffer_da_mapped(bh))
1333 clear_buffer_da_mapped(bh);
1334 if (buffer_unwritten(bh) ||
1335 buffer_mapped(bh))
1336 BUG_ON(bh->b_blocknr != pblock);
1337 if (map->m_flags & EXT4_MAP_UNINIT)
1338 set_buffer_uninit(bh);
1339 clear_buffer_unwritten(bh);
1340 }
1341
1342 /* skip page if block allocation undone */
1343 if (buffer_delay(bh) || buffer_unwritten(bh))
1344 skip_page = 1;
1345 bh = bh->b_this_page;
1346 block_start += bh->b_size;
1347 cur_logical++;
1348 pblock++;
1349 } while (bh != page_bufs);
1350
1351 if (skip_page)
1352 goto skip_page;
1353
1354 if (commit_write)
1355 /* mark the buffer_heads as dirty & uptodate */
1356 block_commit_write(page, 0, len);
1357
1358 clear_page_dirty_for_io(page);
1359 /*
1360 * Delalloc doesn't support data journalling,
1361 * but eventually maybe we'll lift this
1362 * restriction.
1363 */
1364 if (unlikely(journal_data && PageChecked(page)))
1365 err = __ext4_journalled_writepage(page, len);
1366 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1367 err = ext4_bio_write_page(&io_submit, page,
1368 len, mpd->wbc);
1369 else if (buffer_uninit(page_bufs)) {
1370 ext4_set_bh_endio(page_bufs, inode);
1371 err = block_write_full_page_endio(page,
1372 noalloc_get_block_write,
1373 mpd->wbc, ext4_end_io_buffer_write);
1374 } else
1375 err = block_write_full_page(page,
1376 noalloc_get_block_write, mpd->wbc);
1377
1378 if (!err)
1379 mpd->pages_written++;
1380 /*
1381 * In error case, we have to continue because
1382 * remaining pages are still locked
1383 */
1384 if (ret == 0)
1385 ret = err;
1386 }
1387 pagevec_release(&pvec);
1388 }
1389 ext4_io_submit(&io_submit);
1390 return ret;
1391 }
1392
1393 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1394 {
1395 int nr_pages, i;
1396 pgoff_t index, end;
1397 struct pagevec pvec;
1398 struct inode *inode = mpd->inode;
1399 struct address_space *mapping = inode->i_mapping;
1400
1401 index = mpd->first_page;
1402 end = mpd->next_page - 1;
1403 while (index <= end) {
1404 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1405 if (nr_pages == 0)
1406 break;
1407 for (i = 0; i < nr_pages; i++) {
1408 struct page *page = pvec.pages[i];
1409 if (page->index > end)
1410 break;
1411 BUG_ON(!PageLocked(page));
1412 BUG_ON(PageWriteback(page));
1413 block_invalidatepage(page, 0);
1414 ClearPageUptodate(page);
1415 unlock_page(page);
1416 }
1417 index = pvec.pages[nr_pages - 1]->index + 1;
1418 pagevec_release(&pvec);
1419 }
1420 return;
1421 }
1422
1423 static void ext4_print_free_blocks(struct inode *inode)
1424 {
1425 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1426 printk(KERN_CRIT "Total free blocks count %lld\n",
1427 EXT4_C2B(EXT4_SB(inode->i_sb),
1428 ext4_count_free_clusters(inode->i_sb)));
1429 printk(KERN_CRIT "Free/Dirty block details\n");
1430 printk(KERN_CRIT "free_blocks=%lld\n",
1431 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1432 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1433 printk(KERN_CRIT "dirty_blocks=%lld\n",
1434 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1435 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1436 printk(KERN_CRIT "Block reservation details\n");
1437 printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
1438 EXT4_I(inode)->i_reserved_data_blocks);
1439 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
1440 EXT4_I(inode)->i_reserved_meta_blocks);
1441 return;
1442 }
1443
1444 /*
1445 * mpage_da_map_and_submit - go through given space, map them
1446 * if necessary, and then submit them for I/O
1447 *
1448 * @mpd - bh describing space
1449 *
1450 * The function skips space we know is already mapped to disk blocks.
1451 *
1452 */
1453 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1454 {
1455 int err, blks, get_blocks_flags;
1456 struct ext4_map_blocks map, *mapp = NULL;
1457 sector_t next = mpd->b_blocknr;
1458 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1459 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1460 handle_t *handle = NULL;
1461
1462 /*
1463 * If the blocks are mapped already, or we couldn't accumulate
1464 * any blocks, then proceed immediately to the submission stage.
1465 */
1466 if ((mpd->b_size == 0) ||
1467 ((mpd->b_state & (1 << BH_Mapped)) &&
1468 !(mpd->b_state & (1 << BH_Delay)) &&
1469 !(mpd->b_state & (1 << BH_Unwritten))))
1470 goto submit_io;
1471
1472 handle = ext4_journal_current_handle();
1473 BUG_ON(!handle);
1474
1475 /*
1476 * Call ext4_map_blocks() to allocate any delayed allocation
1477 * blocks, or to convert an uninitialized extent to be
1478 * initialized (in the case where we have written into
1479 * one or more preallocated blocks).
1480 *
1481 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1482 * indicate that we are on the delayed allocation path. This
1483 * affects functions in many different parts of the allocation
1484 * call path. This flag exists primarily because we don't
1485 * want to change *many* call functions, so ext4_map_blocks()
1486 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1487 * inode's allocation semaphore is taken.
1488 *
1489 * If the blocks in questions were delalloc blocks, set
1490 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1491 * variables are updated after the blocks have been allocated.
1492 */
1493 map.m_lblk = next;
1494 map.m_len = max_blocks;
1495 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1496 if (ext4_should_dioread_nolock(mpd->inode))
1497 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1498 if (mpd->b_state & (1 << BH_Delay))
1499 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1500
1501 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1502 if (blks < 0) {
1503 struct super_block *sb = mpd->inode->i_sb;
1504
1505 err = blks;
1506 /*
1507 * If get block returns EAGAIN or ENOSPC and there
1508 * appears to be free blocks we will just let
1509 * mpage_da_submit_io() unlock all of the pages.
1510 */
1511 if (err == -EAGAIN)
1512 goto submit_io;
1513
1514 if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1515 mpd->retval = err;
1516 goto submit_io;
1517 }
1518
1519 /*
1520 * get block failure will cause us to loop in
1521 * writepages, because a_ops->writepage won't be able
1522 * to make progress. The page will be redirtied by
1523 * writepage and writepages will again try to write
1524 * the same.
1525 */
1526 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1527 ext4_msg(sb, KERN_CRIT,
1528 "delayed block allocation failed for inode %lu "
1529 "at logical offset %llu with max blocks %zd "
1530 "with error %d", mpd->inode->i_ino,
1531 (unsigned long long) next,
1532 mpd->b_size >> mpd->inode->i_blkbits, err);
1533 ext4_msg(sb, KERN_CRIT,
1534 "This should not happen!! Data will be lost\n");
1535 if (err == -ENOSPC)
1536 ext4_print_free_blocks(mpd->inode);
1537 }
1538 /* invalidate all the pages */
1539 ext4_da_block_invalidatepages(mpd);
1540
1541 /* Mark this page range as having been completed */
1542 mpd->io_done = 1;
1543 return;
1544 }
1545 BUG_ON(blks == 0);
1546
1547 mapp = &map;
1548 if (map.m_flags & EXT4_MAP_NEW) {
1549 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1550 int i;
1551
1552 for (i = 0; i < map.m_len; i++)
1553 unmap_underlying_metadata(bdev, map.m_pblk + i);
1554
1555 if (ext4_should_order_data(mpd->inode)) {
1556 err = ext4_jbd2_file_inode(handle, mpd->inode);
1557 if (err) {
1558 /* Only if the journal is aborted */
1559 mpd->retval = err;
1560 goto submit_io;
1561 }
1562 }
1563 }
1564
1565 /*
1566 * Update on-disk size along with block allocation.
1567 */
1568 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1569 if (disksize > i_size_read(mpd->inode))
1570 disksize = i_size_read(mpd->inode);
1571 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1572 ext4_update_i_disksize(mpd->inode, disksize);
1573 err = ext4_mark_inode_dirty(handle, mpd->inode);
1574 if (err)
1575 ext4_error(mpd->inode->i_sb,
1576 "Failed to mark inode %lu dirty",
1577 mpd->inode->i_ino);
1578 }
1579
1580 submit_io:
1581 mpage_da_submit_io(mpd, mapp);
1582 mpd->io_done = 1;
1583 }
1584
1585 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1586 (1 << BH_Delay) | (1 << BH_Unwritten))
1587
1588 /*
1589 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1590 *
1591 * @mpd->lbh - extent of blocks
1592 * @logical - logical number of the block in the file
1593 * @bh - bh of the block (used to access block's state)
1594 *
1595 * the function is used to collect contig. blocks in same state
1596 */
1597 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1598 sector_t logical, size_t b_size,
1599 unsigned long b_state)
1600 {
1601 sector_t next;
1602 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1603
1604 /*
1605 * XXX Don't go larger than mballoc is willing to allocate
1606 * This is a stopgap solution. We eventually need to fold
1607 * mpage_da_submit_io() into this function and then call
1608 * ext4_map_blocks() multiple times in a loop
1609 */
1610 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1611 goto flush_it;
1612
1613 /* check if thereserved journal credits might overflow */
1614 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1615 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1616 /*
1617 * With non-extent format we are limited by the journal
1618 * credit available. Total credit needed to insert
1619 * nrblocks contiguous blocks is dependent on the
1620 * nrblocks. So limit nrblocks.
1621 */
1622 goto flush_it;
1623 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1624 EXT4_MAX_TRANS_DATA) {
1625 /*
1626 * Adding the new buffer_head would make it cross the
1627 * allowed limit for which we have journal credit
1628 * reserved. So limit the new bh->b_size
1629 */
1630 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1631 mpd->inode->i_blkbits;
1632 /* we will do mpage_da_submit_io in the next loop */
1633 }
1634 }
1635 /*
1636 * First block in the extent
1637 */
1638 if (mpd->b_size == 0) {
1639 mpd->b_blocknr = logical;
1640 mpd->b_size = b_size;
1641 mpd->b_state = b_state & BH_FLAGS;
1642 return;
1643 }
1644
1645 next = mpd->b_blocknr + nrblocks;
1646 /*
1647 * Can we merge the block to our big extent?
1648 */
1649 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1650 mpd->b_size += b_size;
1651 return;
1652 }
1653
1654 flush_it:
1655 /*
1656 * We couldn't merge the block to our extent, so we
1657 * need to flush current extent and start new one
1658 */
1659 mpage_da_map_and_submit(mpd);
1660 return;
1661 }
1662
1663 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1664 {
1665 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1666 }
1667
1668 /*
1669 * This function is grabs code from the very beginning of
1670 * ext4_map_blocks, but assumes that the caller is from delayed write
1671 * time. This function looks up the requested blocks and sets the
1672 * buffer delay bit under the protection of i_data_sem.
1673 */
1674 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1675 struct ext4_map_blocks *map,
1676 struct buffer_head *bh)
1677 {
1678 int retval;
1679 sector_t invalid_block = ~((sector_t) 0xffff);
1680
1681 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1682 invalid_block = ~0;
1683
1684 map->m_flags = 0;
1685 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1686 "logical block %lu\n", inode->i_ino, map->m_len,
1687 (unsigned long) map->m_lblk);
1688 /*
1689 * Try to see if we can get the block without requesting a new
1690 * file system block.
1691 */
1692 down_read((&EXT4_I(inode)->i_data_sem));
1693 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1694 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1695 else
1696 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1697
1698 if (retval == 0) {
1699 /*
1700 * XXX: __block_prepare_write() unmaps passed block,
1701 * is it OK?
1702 */
1703 /* If the block was allocated from previously allocated cluster,
1704 * then we dont need to reserve it again. */
1705 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1706 retval = ext4_da_reserve_space(inode, iblock);
1707 if (retval)
1708 /* not enough space to reserve */
1709 goto out_unlock;
1710 }
1711
1712 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1713 * and it should not appear on the bh->b_state.
1714 */
1715 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1716
1717 map_bh(bh, inode->i_sb, invalid_block);
1718 set_buffer_new(bh);
1719 set_buffer_delay(bh);
1720 }
1721
1722 out_unlock:
1723 up_read((&EXT4_I(inode)->i_data_sem));
1724
1725 return retval;
1726 }
1727
1728 /*
1729 * This is a special get_blocks_t callback which is used by
1730 * ext4_da_write_begin(). It will either return mapped block or
1731 * reserve space for a single block.
1732 *
1733 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1734 * We also have b_blocknr = -1 and b_bdev initialized properly
1735 *
1736 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1737 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1738 * initialized properly.
1739 */
1740 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1741 struct buffer_head *bh, int create)
1742 {
1743 struct ext4_map_blocks map;
1744 int ret = 0;
1745
1746 BUG_ON(create == 0);
1747 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1748
1749 map.m_lblk = iblock;
1750 map.m_len = 1;
1751
1752 /*
1753 * first, we need to know whether the block is allocated already
1754 * preallocated blocks are unmapped but should treated
1755 * the same as allocated blocks.
1756 */
1757 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1758 if (ret <= 0)
1759 return ret;
1760
1761 map_bh(bh, inode->i_sb, map.m_pblk);
1762 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1763
1764 if (buffer_unwritten(bh)) {
1765 /* A delayed write to unwritten bh should be marked
1766 * new and mapped. Mapped ensures that we don't do
1767 * get_block multiple times when we write to the same
1768 * offset and new ensures that we do proper zero out
1769 * for partial write.
1770 */
1771 set_buffer_new(bh);
1772 set_buffer_mapped(bh);
1773 }
1774 return 0;
1775 }
1776
1777 /*
1778 * This function is used as a standard get_block_t calback function
1779 * when there is no desire to allocate any blocks. It is used as a
1780 * callback function for block_write_begin() and block_write_full_page().
1781 * These functions should only try to map a single block at a time.
1782 *
1783 * Since this function doesn't do block allocations even if the caller
1784 * requests it by passing in create=1, it is critically important that
1785 * any caller checks to make sure that any buffer heads are returned
1786 * by this function are either all already mapped or marked for
1787 * delayed allocation before calling block_write_full_page(). Otherwise,
1788 * b_blocknr could be left unitialized, and the page write functions will
1789 * be taken by surprise.
1790 */
1791 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1792 struct buffer_head *bh_result, int create)
1793 {
1794 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1795 return _ext4_get_block(inode, iblock, bh_result, 0);
1796 }
1797
1798 static int bget_one(handle_t *handle, struct buffer_head *bh)
1799 {
1800 get_bh(bh);
1801 return 0;
1802 }
1803
1804 static int bput_one(handle_t *handle, struct buffer_head *bh)
1805 {
1806 put_bh(bh);
1807 return 0;
1808 }
1809
1810 static int __ext4_journalled_writepage(struct page *page,
1811 unsigned int len)
1812 {
1813 struct address_space *mapping = page->mapping;
1814 struct inode *inode = mapping->host;
1815 struct buffer_head *page_bufs;
1816 handle_t *handle = NULL;
1817 int ret = 0;
1818 int err;
1819
1820 ClearPageChecked(page);
1821 page_bufs = page_buffers(page);
1822 BUG_ON(!page_bufs);
1823 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1824 /* As soon as we unlock the page, it can go away, but we have
1825 * references to buffers so we are safe */
1826 unlock_page(page);
1827
1828 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1829 if (IS_ERR(handle)) {
1830 ret = PTR_ERR(handle);
1831 goto out;
1832 }
1833
1834 BUG_ON(!ext4_handle_valid(handle));
1835
1836 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1837 do_journal_get_write_access);
1838
1839 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1840 write_end_fn);
1841 if (ret == 0)
1842 ret = err;
1843 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1844 err = ext4_journal_stop(handle);
1845 if (!ret)
1846 ret = err;
1847
1848 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
1849 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1850 out:
1851 return ret;
1852 }
1853
1854 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1855 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1856
1857 /*
1858 * Note that we don't need to start a transaction unless we're journaling data
1859 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1860 * need to file the inode to the transaction's list in ordered mode because if
1861 * we are writing back data added by write(), the inode is already there and if
1862 * we are writing back data modified via mmap(), no one guarantees in which
1863 * transaction the data will hit the disk. In case we are journaling data, we
1864 * cannot start transaction directly because transaction start ranks above page
1865 * lock so we have to do some magic.
1866 *
1867 * This function can get called via...
1868 * - ext4_da_writepages after taking page lock (have journal handle)
1869 * - journal_submit_inode_data_buffers (no journal handle)
1870 * - shrink_page_list via pdflush (no journal handle)
1871 * - grab_page_cache when doing write_begin (have journal handle)
1872 *
1873 * We don't do any block allocation in this function. If we have page with
1874 * multiple blocks we need to write those buffer_heads that are mapped. This
1875 * is important for mmaped based write. So if we do with blocksize 1K
1876 * truncate(f, 1024);
1877 * a = mmap(f, 0, 4096);
1878 * a[0] = 'a';
1879 * truncate(f, 4096);
1880 * we have in the page first buffer_head mapped via page_mkwrite call back
1881 * but other bufer_heads would be unmapped but dirty(dirty done via the
1882 * do_wp_page). So writepage should write the first block. If we modify
1883 * the mmap area beyond 1024 we will again get a page_fault and the
1884 * page_mkwrite callback will do the block allocation and mark the
1885 * buffer_heads mapped.
1886 *
1887 * We redirty the page if we have any buffer_heads that is either delay or
1888 * unwritten in the page.
1889 *
1890 * We can get recursively called as show below.
1891 *
1892 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1893 * ext4_writepage()
1894 *
1895 * But since we don't do any block allocation we should not deadlock.
1896 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1897 */
1898 static int ext4_writepage(struct page *page,
1899 struct writeback_control *wbc)
1900 {
1901 int ret = 0, commit_write = 0;
1902 loff_t size;
1903 unsigned int len;
1904 struct buffer_head *page_bufs = NULL;
1905 struct inode *inode = page->mapping->host;
1906
1907 trace_ext4_writepage(page);
1908 size = i_size_read(inode);
1909 if (page->index == size >> PAGE_CACHE_SHIFT)
1910 len = size & ~PAGE_CACHE_MASK;
1911 else
1912 len = PAGE_CACHE_SIZE;
1913
1914 /*
1915 * If the page does not have buffers (for whatever reason),
1916 * try to create them using __block_write_begin. If this
1917 * fails, redirty the page and move on.
1918 */
1919 if (!page_has_buffers(page)) {
1920 if (__block_write_begin(page, 0, len,
1921 noalloc_get_block_write)) {
1922 redirty_page:
1923 redirty_page_for_writepage(wbc, page);
1924 unlock_page(page);
1925 return 0;
1926 }
1927 commit_write = 1;
1928 }
1929 page_bufs = page_buffers(page);
1930 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1931 ext4_bh_delay_or_unwritten)) {
1932 /*
1933 * We don't want to do block allocation, so redirty
1934 * the page and return. We may reach here when we do
1935 * a journal commit via journal_submit_inode_data_buffers.
1936 * We can also reach here via shrink_page_list but it
1937 * should never be for direct reclaim so warn if that
1938 * happens
1939 */
1940 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1941 PF_MEMALLOC);
1942 goto redirty_page;
1943 }
1944 if (commit_write)
1945 /* now mark the buffer_heads as dirty and uptodate */
1946 block_commit_write(page, 0, len);
1947
1948 if (PageChecked(page) && ext4_should_journal_data(inode))
1949 /*
1950 * It's mmapped pagecache. Add buffers and journal it. There
1951 * doesn't seem much point in redirtying the page here.
1952 */
1953 return __ext4_journalled_writepage(page, len);
1954
1955 if (buffer_uninit(page_bufs)) {
1956 ext4_set_bh_endio(page_bufs, inode);
1957 ret = block_write_full_page_endio(page, noalloc_get_block_write,
1958 wbc, ext4_end_io_buffer_write);
1959 } else
1960 ret = block_write_full_page(page, noalloc_get_block_write,
1961 wbc);
1962
1963 return ret;
1964 }
1965
1966 /*
1967 * This is called via ext4_da_writepages() to
1968 * calculate the total number of credits to reserve to fit
1969 * a single extent allocation into a single transaction,
1970 * ext4_da_writpeages() will loop calling this before
1971 * the block allocation.
1972 */
1973
1974 static int ext4_da_writepages_trans_blocks(struct inode *inode)
1975 {
1976 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
1977
1978 /*
1979 * With non-extent format the journal credit needed to
1980 * insert nrblocks contiguous block is dependent on
1981 * number of contiguous block. So we will limit
1982 * number of contiguous block to a sane value
1983 */
1984 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
1985 (max_blocks > EXT4_MAX_TRANS_DATA))
1986 max_blocks = EXT4_MAX_TRANS_DATA;
1987
1988 return ext4_chunk_trans_blocks(inode, max_blocks);
1989 }
1990
1991 /*
1992 * write_cache_pages_da - walk the list of dirty pages of the given
1993 * address space and accumulate pages that need writing, and call
1994 * mpage_da_map_and_submit to map a single contiguous memory region
1995 * and then write them.
1996 */
1997 static int write_cache_pages_da(struct address_space *mapping,
1998 struct writeback_control *wbc,
1999 struct mpage_da_data *mpd,
2000 pgoff_t *done_index)
2001 {
2002 struct buffer_head *bh, *head;
2003 struct inode *inode = mapping->host;
2004 struct pagevec pvec;
2005 unsigned int nr_pages;
2006 sector_t logical;
2007 pgoff_t index, end;
2008 long nr_to_write = wbc->nr_to_write;
2009 int i, tag, ret = 0;
2010
2011 memset(mpd, 0, sizeof(struct mpage_da_data));
2012 mpd->wbc = wbc;
2013 mpd->inode = inode;
2014 pagevec_init(&pvec, 0);
2015 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2016 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2017
2018 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2019 tag = PAGECACHE_TAG_TOWRITE;
2020 else
2021 tag = PAGECACHE_TAG_DIRTY;
2022
2023 *done_index = index;
2024 while (index <= end) {
2025 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2026 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2027 if (nr_pages == 0)
2028 return 0;
2029
2030 for (i = 0; i < nr_pages; i++) {
2031 struct page *page = pvec.pages[i];
2032
2033 /*
2034 * At this point, the page may be truncated or
2035 * invalidated (changing page->mapping to NULL), or
2036 * even swizzled back from swapper_space to tmpfs file
2037 * mapping. However, page->index will not change
2038 * because we have a reference on the page.
2039 */
2040 if (page->index > end)
2041 goto out;
2042
2043 *done_index = page->index + 1;
2044
2045 /*
2046 * If we can't merge this page, and we have
2047 * accumulated an contiguous region, write it
2048 */
2049 if ((mpd->next_page != page->index) &&
2050 (mpd->next_page != mpd->first_page)) {
2051 mpage_da_map_and_submit(mpd);
2052 goto ret_extent_tail;
2053 }
2054
2055 lock_page(page);
2056
2057 /*
2058 * If the page is no longer dirty, or its
2059 * mapping no longer corresponds to inode we
2060 * are writing (which means it has been
2061 * truncated or invalidated), or the page is
2062 * already under writeback and we are not
2063 * doing a data integrity writeback, skip the page
2064 */
2065 if (!PageDirty(page) ||
2066 (PageWriteback(page) &&
2067 (wbc->sync_mode == WB_SYNC_NONE)) ||
2068 unlikely(page->mapping != mapping)) {
2069 unlock_page(page);
2070 continue;
2071 }
2072
2073 wait_on_page_writeback(page);
2074 BUG_ON(PageWriteback(page));
2075
2076 if (mpd->next_page != page->index)
2077 mpd->first_page = page->index;
2078 mpd->next_page = page->index + 1;
2079 logical = (sector_t) page->index <<
2080 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2081
2082 if (!page_has_buffers(page)) {
2083 mpage_add_bh_to_extent(mpd, logical,
2084 PAGE_CACHE_SIZE,
2085 (1 << BH_Dirty) | (1 << BH_Uptodate));
2086 if (mpd->io_done)
2087 goto ret_extent_tail;
2088 } else {
2089 /*
2090 * Page with regular buffer heads,
2091 * just add all dirty ones
2092 */
2093 head = page_buffers(page);
2094 bh = head;
2095 do {
2096 BUG_ON(buffer_locked(bh));
2097 /*
2098 * We need to try to allocate
2099 * unmapped blocks in the same page.
2100 * Otherwise we won't make progress
2101 * with the page in ext4_writepage
2102 */
2103 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2104 mpage_add_bh_to_extent(mpd, logical,
2105 bh->b_size,
2106 bh->b_state);
2107 if (mpd->io_done)
2108 goto ret_extent_tail;
2109 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2110 /*
2111 * mapped dirty buffer. We need
2112 * to update the b_state
2113 * because we look at b_state
2114 * in mpage_da_map_blocks. We
2115 * don't update b_size because
2116 * if we find an unmapped
2117 * buffer_head later we need to
2118 * use the b_state flag of that
2119 * buffer_head.
2120 */
2121 if (mpd->b_size == 0)
2122 mpd->b_state = bh->b_state & BH_FLAGS;
2123 }
2124 logical++;
2125 } while ((bh = bh->b_this_page) != head);
2126 }
2127
2128 if (nr_to_write > 0) {
2129 nr_to_write--;
2130 if (nr_to_write == 0 &&
2131 wbc->sync_mode == WB_SYNC_NONE)
2132 /*
2133 * We stop writing back only if we are
2134 * not doing integrity sync. In case of
2135 * integrity sync we have to keep going
2136 * because someone may be concurrently
2137 * dirtying pages, and we might have
2138 * synced a lot of newly appeared dirty
2139 * pages, but have not synced all of the
2140 * old dirty pages.
2141 */
2142 goto out;
2143 }
2144 }
2145 pagevec_release(&pvec);
2146 cond_resched();
2147 }
2148 return 0;
2149 ret_extent_tail:
2150 ret = MPAGE_DA_EXTENT_TAIL;
2151 out:
2152 pagevec_release(&pvec);
2153 cond_resched();
2154 return ret;
2155 }
2156
2157
2158 static int ext4_da_writepages(struct address_space *mapping,
2159 struct writeback_control *wbc)
2160 {
2161 pgoff_t index;
2162 int range_whole = 0;
2163 handle_t *handle = NULL;
2164 struct mpage_da_data mpd;
2165 struct inode *inode = mapping->host;
2166 int pages_written = 0;
2167 unsigned int max_pages;
2168 int range_cyclic, cycled = 1, io_done = 0;
2169 int needed_blocks, ret = 0;
2170 long desired_nr_to_write, nr_to_writebump = 0;
2171 loff_t range_start = wbc->range_start;
2172 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2173 pgoff_t done_index = 0;
2174 pgoff_t end;
2175 struct blk_plug plug;
2176
2177 trace_ext4_da_writepages(inode, wbc);
2178
2179 /*
2180 * No pages to write? This is mainly a kludge to avoid starting
2181 * a transaction for special inodes like journal inode on last iput()
2182 * because that could violate lock ordering on umount
2183 */
2184 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2185 return 0;
2186
2187 /*
2188 * If the filesystem has aborted, it is read-only, so return
2189 * right away instead of dumping stack traces later on that
2190 * will obscure the real source of the problem. We test
2191 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2192 * the latter could be true if the filesystem is mounted
2193 * read-only, and in that case, ext4_da_writepages should
2194 * *never* be called, so if that ever happens, we would want
2195 * the stack trace.
2196 */
2197 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2198 return -EROFS;
2199
2200 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2201 range_whole = 1;
2202
2203 range_cyclic = wbc->range_cyclic;
2204 if (wbc->range_cyclic) {
2205 index = mapping->writeback_index;
2206 if (index)
2207 cycled = 0;
2208 wbc->range_start = index << PAGE_CACHE_SHIFT;
2209 wbc->range_end = LLONG_MAX;
2210 wbc->range_cyclic = 0;
2211 end = -1;
2212 } else {
2213 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2214 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2215 }
2216
2217 /*
2218 * This works around two forms of stupidity. The first is in
2219 * the writeback code, which caps the maximum number of pages
2220 * written to be 1024 pages. This is wrong on multiple
2221 * levels; different architectues have a different page size,
2222 * which changes the maximum amount of data which gets
2223 * written. Secondly, 4 megabytes is way too small. XFS
2224 * forces this value to be 16 megabytes by multiplying
2225 * nr_to_write parameter by four, and then relies on its
2226 * allocator to allocate larger extents to make them
2227 * contiguous. Unfortunately this brings us to the second
2228 * stupidity, which is that ext4's mballoc code only allocates
2229 * at most 2048 blocks. So we force contiguous writes up to
2230 * the number of dirty blocks in the inode, or
2231 * sbi->max_writeback_mb_bump whichever is smaller.
2232 */
2233 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2234 if (!range_cyclic && range_whole) {
2235 if (wbc->nr_to_write == LONG_MAX)
2236 desired_nr_to_write = wbc->nr_to_write;
2237 else
2238 desired_nr_to_write = wbc->nr_to_write * 8;
2239 } else
2240 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2241 max_pages);
2242 if (desired_nr_to_write > max_pages)
2243 desired_nr_to_write = max_pages;
2244
2245 if (wbc->nr_to_write < desired_nr_to_write) {
2246 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2247 wbc->nr_to_write = desired_nr_to_write;
2248 }
2249
2250 retry:
2251 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2252 tag_pages_for_writeback(mapping, index, end);
2253
2254 blk_start_plug(&plug);
2255 while (!ret && wbc->nr_to_write > 0) {
2256
2257 /*
2258 * we insert one extent at a time. So we need
2259 * credit needed for single extent allocation.
2260 * journalled mode is currently not supported
2261 * by delalloc
2262 */
2263 BUG_ON(ext4_should_journal_data(inode));
2264 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2265
2266 /* start a new transaction*/
2267 handle = ext4_journal_start(inode, needed_blocks);
2268 if (IS_ERR(handle)) {
2269 ret = PTR_ERR(handle);
2270 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2271 "%ld pages, ino %lu; err %d", __func__,
2272 wbc->nr_to_write, inode->i_ino, ret);
2273 goto out_writepages;
2274 }
2275
2276 /*
2277 * Now call write_cache_pages_da() to find the next
2278 * contiguous region of logical blocks that need
2279 * blocks to be allocated by ext4 and submit them.
2280 */
2281 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2282 /*
2283 * If we have a contiguous extent of pages and we
2284 * haven't done the I/O yet, map the blocks and submit
2285 * them for I/O.
2286 */
2287 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2288 mpage_da_map_and_submit(&mpd);
2289 ret = MPAGE_DA_EXTENT_TAIL;
2290 }
2291 trace_ext4_da_write_pages(inode, &mpd);
2292 wbc->nr_to_write -= mpd.pages_written;
2293
2294 ext4_journal_stop(handle);
2295
2296 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2297 /* commit the transaction which would
2298 * free blocks released in the transaction
2299 * and try again
2300 */
2301 jbd2_journal_force_commit_nested(sbi->s_journal);
2302 ret = 0;
2303 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2304 /*
2305 * Got one extent now try with rest of the pages.
2306 * If mpd.retval is set -EIO, journal is aborted.
2307 * So we don't need to write any more.
2308 */
2309 pages_written += mpd.pages_written;
2310 ret = mpd.retval;
2311 io_done = 1;
2312 } else if (wbc->nr_to_write)
2313 /*
2314 * There is no more writeout needed
2315 * or we requested for a noblocking writeout
2316 * and we found the device congested
2317 */
2318 break;
2319 }
2320 blk_finish_plug(&plug);
2321 if (!io_done && !cycled) {
2322 cycled = 1;
2323 index = 0;
2324 wbc->range_start = index << PAGE_CACHE_SHIFT;
2325 wbc->range_end = mapping->writeback_index - 1;
2326 goto retry;
2327 }
2328
2329 /* Update index */
2330 wbc->range_cyclic = range_cyclic;
2331 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2332 /*
2333 * set the writeback_index so that range_cyclic
2334 * mode will write it back later
2335 */
2336 mapping->writeback_index = done_index;
2337
2338 out_writepages:
2339 wbc->nr_to_write -= nr_to_writebump;
2340 wbc->range_start = range_start;
2341 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2342 return ret;
2343 }
2344
2345 #define FALL_BACK_TO_NONDELALLOC 1
2346 static int ext4_nonda_switch(struct super_block *sb)
2347 {
2348 s64 free_blocks, dirty_blocks;
2349 struct ext4_sb_info *sbi = EXT4_SB(sb);
2350
2351 /*
2352 * switch to non delalloc mode if we are running low
2353 * on free block. The free block accounting via percpu
2354 * counters can get slightly wrong with percpu_counter_batch getting
2355 * accumulated on each CPU without updating global counters
2356 * Delalloc need an accurate free block accounting. So switch
2357 * to non delalloc when we are near to error range.
2358 */
2359 free_blocks = EXT4_C2B(sbi,
2360 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2361 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2362 if (2 * free_blocks < 3 * dirty_blocks ||
2363 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2364 /*
2365 * free block count is less than 150% of dirty blocks
2366 * or free blocks is less than watermark
2367 */
2368 return 1;
2369 }
2370 /*
2371 * Even if we don't switch but are nearing capacity,
2372 * start pushing delalloc when 1/2 of free blocks are dirty.
2373 */
2374 if (free_blocks < 2 * dirty_blocks)
2375 writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2376
2377 return 0;
2378 }
2379
2380 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2381 loff_t pos, unsigned len, unsigned flags,
2382 struct page **pagep, void **fsdata)
2383 {
2384 int ret, retries = 0;
2385 struct page *page;
2386 pgoff_t index;
2387 struct inode *inode = mapping->host;
2388 handle_t *handle;
2389 loff_t page_len;
2390
2391 index = pos >> PAGE_CACHE_SHIFT;
2392
2393 if (ext4_nonda_switch(inode->i_sb)) {
2394 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2395 return ext4_write_begin(file, mapping, pos,
2396 len, flags, pagep, fsdata);
2397 }
2398 *fsdata = (void *)0;
2399 trace_ext4_da_write_begin(inode, pos, len, flags);
2400 retry:
2401 /*
2402 * With delayed allocation, we don't log the i_disksize update
2403 * if there is delayed block allocation. But we still need
2404 * to journalling the i_disksize update if writes to the end
2405 * of file which has an already mapped buffer.
2406 */
2407 handle = ext4_journal_start(inode, 1);
2408 if (IS_ERR(handle)) {
2409 ret = PTR_ERR(handle);
2410 goto out;
2411 }
2412 /* We cannot recurse into the filesystem as the transaction is already
2413 * started */
2414 flags |= AOP_FLAG_NOFS;
2415
2416 page = grab_cache_page_write_begin(mapping, index, flags);
2417 if (!page) {
2418 ext4_journal_stop(handle);
2419 ret = -ENOMEM;
2420 goto out;
2421 }
2422 *pagep = page;
2423
2424 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2425 if (ret < 0) {
2426 unlock_page(page);
2427 ext4_journal_stop(handle);
2428 page_cache_release(page);
2429 /*
2430 * block_write_begin may have instantiated a few blocks
2431 * outside i_size. Trim these off again. Don't need
2432 * i_size_read because we hold i_mutex.
2433 */
2434 if (pos + len > inode->i_size)
2435 ext4_truncate_failed_write(inode);
2436 } else {
2437 page_len = pos & (PAGE_CACHE_SIZE - 1);
2438 if (page_len > 0) {
2439 ret = ext4_discard_partial_page_buffers_no_lock(handle,
2440 inode, page, pos - page_len, page_len,
2441 EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2442 }
2443 }
2444
2445 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2446 goto retry;
2447 out:
2448 return ret;
2449 }
2450
2451 /*
2452 * Check if we should update i_disksize
2453 * when write to the end of file but not require block allocation
2454 */
2455 static int ext4_da_should_update_i_disksize(struct page *page,
2456 unsigned long offset)
2457 {
2458 struct buffer_head *bh;
2459 struct inode *inode = page->mapping->host;
2460 unsigned int idx;
2461 int i;
2462
2463 bh = page_buffers(page);
2464 idx = offset >> inode->i_blkbits;
2465
2466 for (i = 0; i < idx; i++)
2467 bh = bh->b_this_page;
2468
2469 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2470 return 0;
2471 return 1;
2472 }
2473
2474 static int ext4_da_write_end(struct file *file,
2475 struct address_space *mapping,
2476 loff_t pos, unsigned len, unsigned copied,
2477 struct page *page, void *fsdata)
2478 {
2479 struct inode *inode = mapping->host;
2480 int ret = 0, ret2;
2481 handle_t *handle = ext4_journal_current_handle();
2482 loff_t new_i_size;
2483 unsigned long start, end;
2484 int write_mode = (int)(unsigned long)fsdata;
2485 loff_t page_len;
2486
2487 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2488 if (ext4_should_order_data(inode)) {
2489 return ext4_ordered_write_end(file, mapping, pos,
2490 len, copied, page, fsdata);
2491 } else if (ext4_should_writeback_data(inode)) {
2492 return ext4_writeback_write_end(file, mapping, pos,
2493 len, copied, page, fsdata);
2494 } else {
2495 BUG();
2496 }
2497 }
2498
2499 trace_ext4_da_write_end(inode, pos, len, copied);
2500 start = pos & (PAGE_CACHE_SIZE - 1);
2501 end = start + copied - 1;
2502
2503 /*
2504 * generic_write_end() will run mark_inode_dirty() if i_size
2505 * changes. So let's piggyback the i_disksize mark_inode_dirty
2506 * into that.
2507 */
2508
2509 new_i_size = pos + copied;
2510 if (new_i_size > EXT4_I(inode)->i_disksize) {
2511 if (ext4_da_should_update_i_disksize(page, end)) {
2512 down_write(&EXT4_I(inode)->i_data_sem);
2513 if (new_i_size > EXT4_I(inode)->i_disksize) {
2514 /*
2515 * Updating i_disksize when extending file
2516 * without needing block allocation
2517 */
2518 if (ext4_should_order_data(inode))
2519 ret = ext4_jbd2_file_inode(handle,
2520 inode);
2521
2522 EXT4_I(inode)->i_disksize = new_i_size;
2523 }
2524 up_write(&EXT4_I(inode)->i_data_sem);
2525 /* We need to mark inode dirty even if
2526 * new_i_size is less that inode->i_size
2527 * bu greater than i_disksize.(hint delalloc)
2528 */
2529 ext4_mark_inode_dirty(handle, inode);
2530 }
2531 }
2532 ret2 = generic_write_end(file, mapping, pos, len, copied,
2533 page, fsdata);
2534
2535 page_len = PAGE_CACHE_SIZE -
2536 ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
2537
2538 if (page_len > 0) {
2539 ret = ext4_discard_partial_page_buffers_no_lock(handle,
2540 inode, page, pos + copied - 1, page_len,
2541 EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2542 }
2543
2544 copied = ret2;
2545 if (ret2 < 0)
2546 ret = ret2;
2547 ret2 = ext4_journal_stop(handle);
2548 if (!ret)
2549 ret = ret2;
2550
2551 return ret ? ret : copied;
2552 }
2553
2554 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2555 {
2556 /*
2557 * Drop reserved blocks
2558 */
2559 BUG_ON(!PageLocked(page));
2560 if (!page_has_buffers(page))
2561 goto out;
2562
2563 ext4_da_page_release_reservation(page, offset);
2564
2565 out:
2566 ext4_invalidatepage(page, offset);
2567
2568 return;
2569 }
2570
2571 /*
2572 * Force all delayed allocation blocks to be allocated for a given inode.
2573 */
2574 int ext4_alloc_da_blocks(struct inode *inode)
2575 {
2576 trace_ext4_alloc_da_blocks(inode);
2577
2578 if (!EXT4_I(inode)->i_reserved_data_blocks &&
2579 !EXT4_I(inode)->i_reserved_meta_blocks)
2580 return 0;
2581
2582 /*
2583 * We do something simple for now. The filemap_flush() will
2584 * also start triggering a write of the data blocks, which is
2585 * not strictly speaking necessary (and for users of
2586 * laptop_mode, not even desirable). However, to do otherwise
2587 * would require replicating code paths in:
2588 *
2589 * ext4_da_writepages() ->
2590 * write_cache_pages() ---> (via passed in callback function)
2591 * __mpage_da_writepage() -->
2592 * mpage_add_bh_to_extent()
2593 * mpage_da_map_blocks()
2594 *
2595 * The problem is that write_cache_pages(), located in
2596 * mm/page-writeback.c, marks pages clean in preparation for
2597 * doing I/O, which is not desirable if we're not planning on
2598 * doing I/O at all.
2599 *
2600 * We could call write_cache_pages(), and then redirty all of
2601 * the pages by calling redirty_page_for_writepage() but that
2602 * would be ugly in the extreme. So instead we would need to
2603 * replicate parts of the code in the above functions,
2604 * simplifying them because we wouldn't actually intend to
2605 * write out the pages, but rather only collect contiguous
2606 * logical block extents, call the multi-block allocator, and
2607 * then update the buffer heads with the block allocations.
2608 *
2609 * For now, though, we'll cheat by calling filemap_flush(),
2610 * which will map the blocks, and start the I/O, but not
2611 * actually wait for the I/O to complete.
2612 */
2613 return filemap_flush(inode->i_mapping);
2614 }
2615
2616 /*
2617 * bmap() is special. It gets used by applications such as lilo and by
2618 * the swapper to find the on-disk block of a specific piece of data.
2619 *
2620 * Naturally, this is dangerous if the block concerned is still in the
2621 * journal. If somebody makes a swapfile on an ext4 data-journaling
2622 * filesystem and enables swap, then they may get a nasty shock when the
2623 * data getting swapped to that swapfile suddenly gets overwritten by
2624 * the original zero's written out previously to the journal and
2625 * awaiting writeback in the kernel's buffer cache.
2626 *
2627 * So, if we see any bmap calls here on a modified, data-journaled file,
2628 * take extra steps to flush any blocks which might be in the cache.
2629 */
2630 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2631 {
2632 struct inode *inode = mapping->host;
2633 journal_t *journal;
2634 int err;
2635
2636 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2637 test_opt(inode->i_sb, DELALLOC)) {
2638 /*
2639 * With delalloc we want to sync the file
2640 * so that we can make sure we allocate
2641 * blocks for file
2642 */
2643 filemap_write_and_wait(mapping);
2644 }
2645
2646 if (EXT4_JOURNAL(inode) &&
2647 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2648 /*
2649 * This is a REALLY heavyweight approach, but the use of
2650 * bmap on dirty files is expected to be extremely rare:
2651 * only if we run lilo or swapon on a freshly made file
2652 * do we expect this to happen.
2653 *
2654 * (bmap requires CAP_SYS_RAWIO so this does not
2655 * represent an unprivileged user DOS attack --- we'd be
2656 * in trouble if mortal users could trigger this path at
2657 * will.)
2658 *
2659 * NB. EXT4_STATE_JDATA is not set on files other than
2660 * regular files. If somebody wants to bmap a directory
2661 * or symlink and gets confused because the buffer
2662 * hasn't yet been flushed to disk, they deserve
2663 * everything they get.
2664 */
2665
2666 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2667 journal = EXT4_JOURNAL(inode);
2668 jbd2_journal_lock_updates(journal);
2669 err = jbd2_journal_flush(journal);
2670 jbd2_journal_unlock_updates(journal);
2671
2672 if (err)
2673 return 0;
2674 }
2675
2676 return generic_block_bmap(mapping, block, ext4_get_block);
2677 }
2678
2679 static int ext4_readpage(struct file *file, struct page *page)
2680 {
2681 trace_ext4_readpage(page);
2682 return mpage_readpage(page, ext4_get_block);
2683 }
2684
2685 static int
2686 ext4_readpages(struct file *file, struct address_space *mapping,
2687 struct list_head *pages, unsigned nr_pages)
2688 {
2689 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2690 }
2691
2692 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2693 {
2694 struct buffer_head *head, *bh;
2695 unsigned int curr_off = 0;
2696
2697 if (!page_has_buffers(page))
2698 return;
2699 head = bh = page_buffers(page);
2700 do {
2701 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2702 && bh->b_private) {
2703 ext4_free_io_end(bh->b_private);
2704 bh->b_private = NULL;
2705 bh->b_end_io = NULL;
2706 }
2707 curr_off = curr_off + bh->b_size;
2708 bh = bh->b_this_page;
2709 } while (bh != head);
2710 }
2711
2712 static void ext4_invalidatepage(struct page *page, unsigned long offset)
2713 {
2714 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2715
2716 trace_ext4_invalidatepage(page, offset);
2717
2718 /*
2719 * free any io_end structure allocated for buffers to be discarded
2720 */
2721 if (ext4_should_dioread_nolock(page->mapping->host))
2722 ext4_invalidatepage_free_endio(page, offset);
2723 /*
2724 * If it's a full truncate we just forget about the pending dirtying
2725 */
2726 if (offset == 0)
2727 ClearPageChecked(page);
2728
2729 if (journal)
2730 jbd2_journal_invalidatepage(journal, page, offset);
2731 else
2732 block_invalidatepage(page, offset);
2733 }
2734
2735 static int ext4_releasepage(struct page *page, gfp_t wait)
2736 {
2737 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2738
2739 trace_ext4_releasepage(page);
2740
2741 WARN_ON(PageChecked(page));
2742 if (!page_has_buffers(page))
2743 return 0;
2744 if (journal)
2745 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2746 else
2747 return try_to_free_buffers(page);
2748 }
2749
2750 /*
2751 * ext4_get_block used when preparing for a DIO write or buffer write.
2752 * We allocate an uinitialized extent if blocks haven't been allocated.
2753 * The extent will be converted to initialized after the IO is complete.
2754 */
2755 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2756 struct buffer_head *bh_result, int create)
2757 {
2758 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2759 inode->i_ino, create);
2760 return _ext4_get_block(inode, iblock, bh_result,
2761 EXT4_GET_BLOCKS_IO_CREATE_EXT);
2762 }
2763
2764 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2765 ssize_t size, void *private, int ret,
2766 bool is_async)
2767 {
2768 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2769 ext4_io_end_t *io_end = iocb->private;
2770 struct workqueue_struct *wq;
2771 unsigned long flags;
2772 struct ext4_inode_info *ei;
2773
2774 /* if not async direct IO or dio with 0 bytes write, just return */
2775 if (!io_end || !size)
2776 goto out;
2777
2778 ext_debug("ext4_end_io_dio(): io_end 0x%p"
2779 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
2780 iocb->private, io_end->inode->i_ino, iocb, offset,
2781 size);
2782
2783 /* if not aio dio with unwritten extents, just free io and return */
2784 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2785 ext4_free_io_end(io_end);
2786 iocb->private = NULL;
2787 out:
2788 if (is_async)
2789 aio_complete(iocb, ret, 0);
2790 inode_dio_done(inode);
2791 return;
2792 }
2793
2794 io_end->offset = offset;
2795 io_end->size = size;
2796 if (is_async) {
2797 io_end->iocb = iocb;
2798 io_end->result = ret;
2799 }
2800 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2801
2802 /* Add the io_end to per-inode completed aio dio list*/
2803 ei = EXT4_I(io_end->inode);
2804 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2805 list_add_tail(&io_end->list, &ei->i_completed_io_list);
2806 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2807
2808 /* queue the work to convert unwritten extents to written */
2809 queue_work(wq, &io_end->work);
2810 iocb->private = NULL;
2811
2812 /* XXX: probably should move into the real I/O completion handler */
2813 inode_dio_done(inode);
2814 }
2815
2816 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2817 {
2818 ext4_io_end_t *io_end = bh->b_private;
2819 struct workqueue_struct *wq;
2820 struct inode *inode;
2821 unsigned long flags;
2822
2823 if (!test_clear_buffer_uninit(bh) || !io_end)
2824 goto out;
2825
2826 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2827 printk("sb umounted, discard end_io request for inode %lu\n",
2828 io_end->inode->i_ino);
2829 ext4_free_io_end(io_end);
2830 goto out;
2831 }
2832
2833 /*
2834 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2835 * but being more careful is always safe for the future change.
2836 */
2837 inode = io_end->inode;
2838 ext4_set_io_unwritten_flag(inode, io_end);
2839
2840 /* Add the io_end to per-inode completed io list*/
2841 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2842 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2843 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2844
2845 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2846 /* queue the work to convert unwritten extents to written */
2847 queue_work(wq, &io_end->work);
2848 out:
2849 bh->b_private = NULL;
2850 bh->b_end_io = NULL;
2851 clear_buffer_uninit(bh);
2852 end_buffer_async_write(bh, uptodate);
2853 }
2854
2855 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2856 {
2857 ext4_io_end_t *io_end;
2858 struct page *page = bh->b_page;
2859 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2860 size_t size = bh->b_size;
2861
2862 retry:
2863 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2864 if (!io_end) {
2865 pr_warn_ratelimited("%s: allocation fail\n", __func__);
2866 schedule();
2867 goto retry;
2868 }
2869 io_end->offset = offset;
2870 io_end->size = size;
2871 /*
2872 * We need to hold a reference to the page to make sure it
2873 * doesn't get evicted before ext4_end_io_work() has a chance
2874 * to convert the extent from written to unwritten.
2875 */
2876 io_end->page = page;
2877 get_page(io_end->page);
2878
2879 bh->b_private = io_end;
2880 bh->b_end_io = ext4_end_io_buffer_write;
2881 return 0;
2882 }
2883
2884 /*
2885 * For ext4 extent files, ext4 will do direct-io write to holes,
2886 * preallocated extents, and those write extend the file, no need to
2887 * fall back to buffered IO.
2888 *
2889 * For holes, we fallocate those blocks, mark them as uninitialized
2890 * If those blocks were preallocated, we mark sure they are splited, but
2891 * still keep the range to write as uninitialized.
2892 *
2893 * The unwrritten extents will be converted to written when DIO is completed.
2894 * For async direct IO, since the IO may still pending when return, we
2895 * set up an end_io call back function, which will do the conversion
2896 * when async direct IO completed.
2897 *
2898 * If the O_DIRECT write will extend the file then add this inode to the
2899 * orphan list. So recovery will truncate it back to the original size
2900 * if the machine crashes during the write.
2901 *
2902 */
2903 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2904 const struct iovec *iov, loff_t offset,
2905 unsigned long nr_segs)
2906 {
2907 struct file *file = iocb->ki_filp;
2908 struct inode *inode = file->f_mapping->host;
2909 ssize_t ret;
2910 size_t count = iov_length(iov, nr_segs);
2911
2912 loff_t final_size = offset + count;
2913 if (rw == WRITE && final_size <= inode->i_size) {
2914 /*
2915 * We could direct write to holes and fallocate.
2916 *
2917 * Allocated blocks to fill the hole are marked as uninitialized
2918 * to prevent parallel buffered read to expose the stale data
2919 * before DIO complete the data IO.
2920 *
2921 * As to previously fallocated extents, ext4 get_block
2922 * will just simply mark the buffer mapped but still
2923 * keep the extents uninitialized.
2924 *
2925 * for non AIO case, we will convert those unwritten extents
2926 * to written after return back from blockdev_direct_IO.
2927 *
2928 * for async DIO, the conversion needs to be defered when
2929 * the IO is completed. The ext4 end_io callback function
2930 * will be called to take care of the conversion work.
2931 * Here for async case, we allocate an io_end structure to
2932 * hook to the iocb.
2933 */
2934 iocb->private = NULL;
2935 EXT4_I(inode)->cur_aio_dio = NULL;
2936 if (!is_sync_kiocb(iocb)) {
2937 iocb->private = ext4_init_io_end(inode, GFP_NOFS);
2938 if (!iocb->private)
2939 return -ENOMEM;
2940 /*
2941 * we save the io structure for current async
2942 * direct IO, so that later ext4_map_blocks()
2943 * could flag the io structure whether there
2944 * is a unwritten extents needs to be converted
2945 * when IO is completed.
2946 */
2947 EXT4_I(inode)->cur_aio_dio = iocb->private;
2948 }
2949
2950 ret = __blockdev_direct_IO(rw, iocb, inode,
2951 inode->i_sb->s_bdev, iov,
2952 offset, nr_segs,
2953 ext4_get_block_write,
2954 ext4_end_io_dio,
2955 NULL,
2956 DIO_LOCKING | DIO_SKIP_HOLES);
2957 if (iocb->private)
2958 EXT4_I(inode)->cur_aio_dio = NULL;
2959 /*
2960 * The io_end structure takes a reference to the inode,
2961 * that structure needs to be destroyed and the
2962 * reference to the inode need to be dropped, when IO is
2963 * complete, even with 0 byte write, or failed.
2964 *
2965 * In the successful AIO DIO case, the io_end structure will be
2966 * desctroyed and the reference to the inode will be dropped
2967 * after the end_io call back function is called.
2968 *
2969 * In the case there is 0 byte write, or error case, since
2970 * VFS direct IO won't invoke the end_io call back function,
2971 * we need to free the end_io structure here.
2972 */
2973 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
2974 ext4_free_io_end(iocb->private);
2975 iocb->private = NULL;
2976 } else if (ret > 0 && ext4_test_inode_state(inode,
2977 EXT4_STATE_DIO_UNWRITTEN)) {
2978 int err;
2979 /*
2980 * for non AIO case, since the IO is already
2981 * completed, we could do the conversion right here
2982 */
2983 err = ext4_convert_unwritten_extents(inode,
2984 offset, ret);
2985 if (err < 0)
2986 ret = err;
2987 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
2988 }
2989 return ret;
2990 }
2991
2992 /* for write the the end of file case, we fall back to old way */
2993 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
2994 }
2995
2996 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
2997 const struct iovec *iov, loff_t offset,
2998 unsigned long nr_segs)
2999 {
3000 struct file *file = iocb->ki_filp;
3001 struct inode *inode = file->f_mapping->host;
3002 ssize_t ret;
3003
3004 /*
3005 * If we are doing data journalling we don't support O_DIRECT
3006 */
3007 if (ext4_should_journal_data(inode))
3008 return 0;
3009
3010 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3011 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3012 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3013 else
3014 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3015 trace_ext4_direct_IO_exit(inode, offset,
3016 iov_length(iov, nr_segs), rw, ret);
3017 return ret;
3018 }
3019
3020 /*
3021 * Pages can be marked dirty completely asynchronously from ext4's journalling
3022 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3023 * much here because ->set_page_dirty is called under VFS locks. The page is
3024 * not necessarily locked.
3025 *
3026 * We cannot just dirty the page and leave attached buffers clean, because the
3027 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3028 * or jbddirty because all the journalling code will explode.
3029 *
3030 * So what we do is to mark the page "pending dirty" and next time writepage
3031 * is called, propagate that into the buffers appropriately.
3032 */
3033 static int ext4_journalled_set_page_dirty(struct page *page)
3034 {
3035 SetPageChecked(page);
3036 return __set_page_dirty_nobuffers(page);
3037 }
3038
3039 static const struct address_space_operations ext4_ordered_aops = {
3040 .readpage = ext4_readpage,
3041 .readpages = ext4_readpages,
3042 .writepage = ext4_writepage,
3043 .write_begin = ext4_write_begin,
3044 .write_end = ext4_ordered_write_end,
3045 .bmap = ext4_bmap,
3046 .invalidatepage = ext4_invalidatepage,
3047 .releasepage = ext4_releasepage,
3048 .direct_IO = ext4_direct_IO,
3049 .migratepage = buffer_migrate_page,
3050 .is_partially_uptodate = block_is_partially_uptodate,
3051 .error_remove_page = generic_error_remove_page,
3052 };
3053
3054 static const struct address_space_operations ext4_writeback_aops = {
3055 .readpage = ext4_readpage,
3056 .readpages = ext4_readpages,
3057 .writepage = ext4_writepage,
3058 .write_begin = ext4_write_begin,
3059 .write_end = ext4_writeback_write_end,
3060 .bmap = ext4_bmap,
3061 .invalidatepage = ext4_invalidatepage,
3062 .releasepage = ext4_releasepage,
3063 .direct_IO = ext4_direct_IO,
3064 .migratepage = buffer_migrate_page,
3065 .is_partially_uptodate = block_is_partially_uptodate,
3066 .error_remove_page = generic_error_remove_page,
3067 };
3068
3069 static const struct address_space_operations ext4_journalled_aops = {
3070 .readpage = ext4_readpage,
3071 .readpages = ext4_readpages,
3072 .writepage = ext4_writepage,
3073 .write_begin = ext4_write_begin,
3074 .write_end = ext4_journalled_write_end,
3075 .set_page_dirty = ext4_journalled_set_page_dirty,
3076 .bmap = ext4_bmap,
3077 .invalidatepage = ext4_invalidatepage,
3078 .releasepage = ext4_releasepage,
3079 .direct_IO = ext4_direct_IO,
3080 .is_partially_uptodate = block_is_partially_uptodate,
3081 .error_remove_page = generic_error_remove_page,
3082 };
3083
3084 static const struct address_space_operations ext4_da_aops = {
3085 .readpage = ext4_readpage,
3086 .readpages = ext4_readpages,
3087 .writepage = ext4_writepage,
3088 .writepages = ext4_da_writepages,
3089 .write_begin = ext4_da_write_begin,
3090 .write_end = ext4_da_write_end,
3091 .bmap = ext4_bmap,
3092 .invalidatepage = ext4_da_invalidatepage,
3093 .releasepage = ext4_releasepage,
3094 .direct_IO = ext4_direct_IO,
3095 .migratepage = buffer_migrate_page,
3096 .is_partially_uptodate = block_is_partially_uptodate,
3097 .error_remove_page = generic_error_remove_page,
3098 };
3099
3100 void ext4_set_aops(struct inode *inode)
3101 {
3102 if (ext4_should_order_data(inode) &&
3103 test_opt(inode->i_sb, DELALLOC))
3104 inode->i_mapping->a_ops = &ext4_da_aops;
3105 else if (ext4_should_order_data(inode))
3106 inode->i_mapping->a_ops = &ext4_ordered_aops;
3107 else if (ext4_should_writeback_data(inode) &&
3108 test_opt(inode->i_sb, DELALLOC))
3109 inode->i_mapping->a_ops = &ext4_da_aops;
3110 else if (ext4_should_writeback_data(inode))
3111 inode->i_mapping->a_ops = &ext4_writeback_aops;
3112 else
3113 inode->i_mapping->a_ops = &ext4_journalled_aops;
3114 }
3115
3116
3117 /*
3118 * ext4_discard_partial_page_buffers()
3119 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3120 * This function finds and locks the page containing the offset
3121 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3122 * Calling functions that already have the page locked should call
3123 * ext4_discard_partial_page_buffers_no_lock directly.
3124 */
3125 int ext4_discard_partial_page_buffers(handle_t *handle,
3126 struct address_space *mapping, loff_t from,
3127 loff_t length, int flags)
3128 {
3129 struct inode *inode = mapping->host;
3130 struct page *page;
3131 int err = 0;
3132
3133 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3134 mapping_gfp_mask(mapping) & ~__GFP_FS);
3135 if (!page)
3136 return -ENOMEM;
3137
3138 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3139 from, length, flags);
3140
3141 unlock_page(page);
3142 page_cache_release(page);
3143 return err;
3144 }
3145
3146 /*
3147 * ext4_discard_partial_page_buffers_no_lock()
3148 * Zeros a page range of length 'length' starting from offset 'from'.
3149 * Buffer heads that correspond to the block aligned regions of the
3150 * zeroed range will be unmapped. Unblock aligned regions
3151 * will have the corresponding buffer head mapped if needed so that
3152 * that region of the page can be updated with the partial zero out.
3153 *
3154 * This function assumes that the page has already been locked. The
3155 * The range to be discarded must be contained with in the given page.
3156 * If the specified range exceeds the end of the page it will be shortened
3157 * to the end of the page that corresponds to 'from'. This function is
3158 * appropriate for updating a page and it buffer heads to be unmapped and
3159 * zeroed for blocks that have been either released, or are going to be
3160 * released.
3161 *
3162 * handle: The journal handle
3163 * inode: The files inode
3164 * page: A locked page that contains the offset "from"
3165 * from: The starting byte offset (from the begining of the file)
3166 * to begin discarding
3167 * len: The length of bytes to discard
3168 * flags: Optional flags that may be used:
3169 *
3170 * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3171 * Only zero the regions of the page whose buffer heads
3172 * have already been unmapped. This flag is appropriate
3173 * for updateing the contents of a page whose blocks may
3174 * have already been released, and we only want to zero
3175 * out the regions that correspond to those released blocks.
3176 *
3177 * Returns zero on sucess or negative on failure.
3178 */
3179 int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3180 struct inode *inode, struct page *page, loff_t from,
3181 loff_t length, int flags)
3182 {
3183 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3184 unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3185 unsigned int blocksize, max, pos;
3186 ext4_lblk_t iblock;
3187 struct buffer_head *bh;
3188 int err = 0;
3189
3190 blocksize = inode->i_sb->s_blocksize;
3191 max = PAGE_CACHE_SIZE - offset;
3192
3193 if (index != page->index)
3194 return -EINVAL;
3195
3196 /*
3197 * correct length if it does not fall between
3198 * 'from' and the end of the page
3199 */
3200 if (length > max || length < 0)
3201 length = max;
3202
3203 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3204
3205 if (!page_has_buffers(page)) {
3206 /*
3207 * If the range to be discarded covers a partial block
3208 * we need to get the page buffers. This is because
3209 * partial blocks cannot be released and the page needs
3210 * to be updated with the contents of the block before
3211 * we write the zeros on top of it.
3212 */
3213 if ((from & (blocksize - 1)) ||
3214 ((from + length) & (blocksize - 1))) {
3215 create_empty_buffers(page, blocksize, 0);
3216 } else {
3217 /*
3218 * If there are no partial blocks,
3219 * there is nothing to update,
3220 * so we can return now
3221 */
3222 return 0;
3223 }
3224 }
3225
3226 /* Find the buffer that contains "offset" */
3227 bh = page_buffers(page);
3228 pos = blocksize;
3229 while (offset >= pos) {
3230 bh = bh->b_this_page;
3231 iblock++;
3232 pos += blocksize;
3233 }
3234
3235 pos = offset;
3236 while (pos < offset + length) {
3237 unsigned int end_of_block, range_to_discard;
3238
3239 err = 0;
3240
3241 /* The length of space left to zero and unmap */
3242 range_to_discard = offset + length - pos;
3243
3244 /* The length of space until the end of the block */
3245 end_of_block = blocksize - (pos & (blocksize-1));
3246
3247 /*
3248 * Do not unmap or zero past end of block
3249 * for this buffer head
3250 */
3251 if (range_to_discard > end_of_block)
3252 range_to_discard = end_of_block;
3253
3254
3255 /*
3256 * Skip this buffer head if we are only zeroing unampped
3257 * regions of the page
3258 */
3259 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3260 buffer_mapped(bh))
3261 goto next;
3262
3263 /* If the range is block aligned, unmap */
3264 if (range_to_discard == blocksize) {
3265 clear_buffer_dirty(bh);
3266 bh->b_bdev = NULL;
3267 clear_buffer_mapped(bh);
3268 clear_buffer_req(bh);
3269 clear_buffer_new(bh);
3270 clear_buffer_delay(bh);
3271 clear_buffer_unwritten(bh);
3272 clear_buffer_uptodate(bh);
3273 zero_user(page, pos, range_to_discard);
3274 BUFFER_TRACE(bh, "Buffer discarded");
3275 goto next;
3276 }
3277
3278 /*
3279 * If this block is not completely contained in the range
3280 * to be discarded, then it is not going to be released. Because
3281 * we need to keep this block, we need to make sure this part
3282 * of the page is uptodate before we modify it by writeing
3283 * partial zeros on it.
3284 */
3285 if (!buffer_mapped(bh)) {
3286 /*
3287 * Buffer head must be mapped before we can read
3288 * from the block
3289 */
3290 BUFFER_TRACE(bh, "unmapped");
3291 ext4_get_block(inode, iblock, bh, 0);
3292 /* unmapped? It's a hole - nothing to do */
3293 if (!buffer_mapped(bh)) {
3294 BUFFER_TRACE(bh, "still unmapped");
3295 goto next;
3296 }
3297 }
3298
3299 /* Ok, it's mapped. Make sure it's up-to-date */
3300 if (PageUptodate(page))
3301 set_buffer_uptodate(bh);
3302
3303 if (!buffer_uptodate(bh)) {
3304 err = -EIO;
3305 ll_rw_block(READ, 1, &bh);
3306 wait_on_buffer(bh);
3307 /* Uhhuh. Read error. Complain and punt.*/
3308 if (!buffer_uptodate(bh))
3309 goto next;
3310 }
3311
3312 if (ext4_should_journal_data(inode)) {
3313 BUFFER_TRACE(bh, "get write access");
3314 err = ext4_journal_get_write_access(handle, bh);
3315 if (err)
3316 goto next;
3317 }
3318
3319 zero_user(page, pos, range_to_discard);
3320
3321 err = 0;
3322 if (ext4_should_journal_data(inode)) {
3323 err = ext4_handle_dirty_metadata(handle, inode, bh);
3324 } else
3325 mark_buffer_dirty(bh);
3326
3327 BUFFER_TRACE(bh, "Partial buffer zeroed");
3328 next:
3329 bh = bh->b_this_page;
3330 iblock++;
3331 pos += range_to_discard;
3332 }
3333
3334 return err;
3335 }
3336
3337 /*
3338 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3339 * up to the end of the block which corresponds to `from'.
3340 * This required during truncate. We need to physically zero the tail end
3341 * of that block so it doesn't yield old data if the file is later grown.
3342 */
3343 int ext4_block_truncate_page(handle_t *handle,
3344 struct address_space *mapping, loff_t from)
3345 {
3346 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3347 unsigned length;
3348 unsigned blocksize;
3349 struct inode *inode = mapping->host;
3350
3351 blocksize = inode->i_sb->s_blocksize;
3352 length = blocksize - (offset & (blocksize - 1));
3353
3354 return ext4_block_zero_page_range(handle, mapping, from, length);
3355 }
3356
3357 /*
3358 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3359 * starting from file offset 'from'. The range to be zero'd must
3360 * be contained with in one block. If the specified range exceeds
3361 * the end of the block it will be shortened to end of the block
3362 * that cooresponds to 'from'
3363 */
3364 int ext4_block_zero_page_range(handle_t *handle,
3365 struct address_space *mapping, loff_t from, loff_t length)
3366 {
3367 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3368 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3369 unsigned blocksize, max, pos;
3370 ext4_lblk_t iblock;
3371 struct inode *inode = mapping->host;
3372 struct buffer_head *bh;
3373 struct page *page;
3374 int err = 0;
3375
3376 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3377 mapping_gfp_mask(mapping) & ~__GFP_FS);
3378 if (!page)
3379 return -ENOMEM;
3380
3381 blocksize = inode->i_sb->s_blocksize;
3382 max = blocksize - (offset & (blocksize - 1));
3383
3384 /*
3385 * correct length if it does not fall between
3386 * 'from' and the end of the block
3387 */
3388 if (length > max || length < 0)
3389 length = max;
3390
3391 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3392
3393 if (!page_has_buffers(page))
3394 create_empty_buffers(page, blocksize, 0);
3395
3396 /* Find the buffer that contains "offset" */
3397 bh = page_buffers(page);
3398 pos = blocksize;
3399 while (offset >= pos) {
3400 bh = bh->b_this_page;
3401 iblock++;
3402 pos += blocksize;
3403 }
3404
3405 err = 0;
3406 if (buffer_freed(bh)) {
3407 BUFFER_TRACE(bh, "freed: skip");
3408 goto unlock;
3409 }
3410
3411 if (!buffer_mapped(bh)) {
3412 BUFFER_TRACE(bh, "unmapped");
3413 ext4_get_block(inode, iblock, bh, 0);
3414 /* unmapped? It's a hole - nothing to do */
3415 if (!buffer_mapped(bh)) {
3416 BUFFER_TRACE(bh, "still unmapped");
3417 goto unlock;
3418 }
3419 }
3420
3421 /* Ok, it's mapped. Make sure it's up-to-date */
3422 if (PageUptodate(page))
3423 set_buffer_uptodate(bh);
3424
3425 if (!buffer_uptodate(bh)) {
3426 err = -EIO;
3427 ll_rw_block(READ, 1, &bh);
3428 wait_on_buffer(bh);
3429 /* Uhhuh. Read error. Complain and punt. */
3430 if (!buffer_uptodate(bh))
3431 goto unlock;
3432 }
3433
3434 if (ext4_should_journal_data(inode)) {
3435 BUFFER_TRACE(bh, "get write access");
3436 err = ext4_journal_get_write_access(handle, bh);
3437 if (err)
3438 goto unlock;
3439 }
3440
3441 zero_user(page, offset, length);
3442
3443 BUFFER_TRACE(bh, "zeroed end of block");
3444
3445 err = 0;
3446 if (ext4_should_journal_data(inode)) {
3447 err = ext4_handle_dirty_metadata(handle, inode, bh);
3448 } else
3449 mark_buffer_dirty(bh);
3450
3451 unlock:
3452 unlock_page(page);
3453 page_cache_release(page);
3454 return err;
3455 }
3456
3457 int ext4_can_truncate(struct inode *inode)
3458 {
3459 if (S_ISREG(inode->i_mode))
3460 return 1;
3461 if (S_ISDIR(inode->i_mode))
3462 return 1;
3463 if (S_ISLNK(inode->i_mode))
3464 return !ext4_inode_is_fast_symlink(inode);
3465 return 0;
3466 }
3467
3468 /*
3469 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3470 * associated with the given offset and length
3471 *
3472 * @inode: File inode
3473 * @offset: The offset where the hole will begin
3474 * @len: The length of the hole
3475 *
3476 * Returns: 0 on sucess or negative on failure
3477 */
3478
3479 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3480 {
3481 struct inode *inode = file->f_path.dentry->d_inode;
3482 if (!S_ISREG(inode->i_mode))
3483 return -ENOTSUPP;
3484
3485 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3486 /* TODO: Add support for non extent hole punching */
3487 return -ENOTSUPP;
3488 }
3489
3490 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3491 /* TODO: Add support for bigalloc file systems */
3492 return -ENOTSUPP;
3493 }
3494
3495 return ext4_ext_punch_hole(file, offset, length);
3496 }
3497
3498 /*
3499 * ext4_truncate()
3500 *
3501 * We block out ext4_get_block() block instantiations across the entire
3502 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3503 * simultaneously on behalf of the same inode.
3504 *
3505 * As we work through the truncate and commmit bits of it to the journal there
3506 * is one core, guiding principle: the file's tree must always be consistent on
3507 * disk. We must be able to restart the truncate after a crash.
3508 *
3509 * The file's tree may be transiently inconsistent in memory (although it
3510 * probably isn't), but whenever we close off and commit a journal transaction,
3511 * the contents of (the filesystem + the journal) must be consistent and
3512 * restartable. It's pretty simple, really: bottom up, right to left (although
3513 * left-to-right works OK too).
3514 *
3515 * Note that at recovery time, journal replay occurs *before* the restart of
3516 * truncate against the orphan inode list.
3517 *
3518 * The committed inode has the new, desired i_size (which is the same as
3519 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3520 * that this inode's truncate did not complete and it will again call
3521 * ext4_truncate() to have another go. So there will be instantiated blocks
3522 * to the right of the truncation point in a crashed ext4 filesystem. But
3523 * that's fine - as long as they are linked from the inode, the post-crash
3524 * ext4_truncate() run will find them and release them.
3525 */
3526 void ext4_truncate(struct inode *inode)
3527 {
3528 trace_ext4_truncate_enter(inode);
3529
3530 if (!ext4_can_truncate(inode))
3531 return;
3532
3533 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3534
3535 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3536 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3537
3538 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3539 ext4_ext_truncate(inode);
3540 else
3541 ext4_ind_truncate(inode);
3542
3543 trace_ext4_truncate_exit(inode);
3544 }
3545
3546 /*
3547 * ext4_get_inode_loc returns with an extra refcount against the inode's
3548 * underlying buffer_head on success. If 'in_mem' is true, we have all
3549 * data in memory that is needed to recreate the on-disk version of this
3550 * inode.
3551 */
3552 static int __ext4_get_inode_loc(struct inode *inode,
3553 struct ext4_iloc *iloc, int in_mem)
3554 {
3555 struct ext4_group_desc *gdp;
3556 struct buffer_head *bh;
3557 struct super_block *sb = inode->i_sb;
3558 ext4_fsblk_t block;
3559 int inodes_per_block, inode_offset;
3560
3561 iloc->bh = NULL;
3562 if (!ext4_valid_inum(sb, inode->i_ino))
3563 return -EIO;
3564
3565 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3566 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3567 if (!gdp)
3568 return -EIO;
3569
3570 /*
3571 * Figure out the offset within the block group inode table
3572 */
3573 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3574 inode_offset = ((inode->i_ino - 1) %
3575 EXT4_INODES_PER_GROUP(sb));
3576 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3577 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3578
3579 bh = sb_getblk(sb, block);
3580 if (!bh) {
3581 EXT4_ERROR_INODE_BLOCK(inode, block,
3582 "unable to read itable block");
3583 return -EIO;
3584 }
3585 if (!buffer_uptodate(bh)) {
3586 lock_buffer(bh);
3587
3588 /*
3589 * If the buffer has the write error flag, we have failed
3590 * to write out another inode in the same block. In this
3591 * case, we don't have to read the block because we may
3592 * read the old inode data successfully.
3593 */
3594 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3595 set_buffer_uptodate(bh);
3596
3597 if (buffer_uptodate(bh)) {
3598 /* someone brought it uptodate while we waited */
3599 unlock_buffer(bh);
3600 goto has_buffer;
3601 }
3602
3603 /*
3604 * If we have all information of the inode in memory and this
3605 * is the only valid inode in the block, we need not read the
3606 * block.
3607 */
3608 if (in_mem) {
3609 struct buffer_head *bitmap_bh;
3610 int i, start;
3611
3612 start = inode_offset & ~(inodes_per_block - 1);
3613
3614 /* Is the inode bitmap in cache? */
3615 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3616 if (!bitmap_bh)
3617 goto make_io;
3618
3619 /*
3620 * If the inode bitmap isn't in cache then the
3621 * optimisation may end up performing two reads instead
3622 * of one, so skip it.
3623 */
3624 if (!buffer_uptodate(bitmap_bh)) {
3625 brelse(bitmap_bh);
3626 goto make_io;
3627 }
3628 for (i = start; i < start + inodes_per_block; i++) {
3629 if (i == inode_offset)
3630 continue;
3631 if (ext4_test_bit(i, bitmap_bh->b_data))
3632 break;
3633 }
3634 brelse(bitmap_bh);
3635 if (i == start + inodes_per_block) {
3636 /* all other inodes are free, so skip I/O */
3637 memset(bh->b_data, 0, bh->b_size);
3638 set_buffer_uptodate(bh);
3639 unlock_buffer(bh);
3640 goto has_buffer;
3641 }
3642 }
3643
3644 make_io:
3645 /*
3646 * If we need to do any I/O, try to pre-readahead extra
3647 * blocks from the inode table.
3648 */
3649 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3650 ext4_fsblk_t b, end, table;
3651 unsigned num;
3652
3653 table = ext4_inode_table(sb, gdp);
3654 /* s_inode_readahead_blks is always a power of 2 */
3655 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3656 if (table > b)
3657 b = table;
3658 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3659 num = EXT4_INODES_PER_GROUP(sb);
3660 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3661 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3662 num -= ext4_itable_unused_count(sb, gdp);
3663 table += num / inodes_per_block;
3664 if (end > table)
3665 end = table;
3666 while (b <= end)
3667 sb_breadahead(sb, b++);
3668 }
3669
3670 /*
3671 * There are other valid inodes in the buffer, this inode
3672 * has in-inode xattrs, or we don't have this inode in memory.
3673 * Read the block from disk.
3674 */
3675 trace_ext4_load_inode(inode);
3676 get_bh(bh);
3677 bh->b_end_io = end_buffer_read_sync;
3678 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3679 wait_on_buffer(bh);
3680 if (!buffer_uptodate(bh)) {
3681 EXT4_ERROR_INODE_BLOCK(inode, block,
3682 "unable to read itable block");
3683 brelse(bh);
3684 return -EIO;
3685 }
3686 }
3687 has_buffer:
3688 iloc->bh = bh;
3689 return 0;
3690 }
3691
3692 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3693 {
3694 /* We have all inode data except xattrs in memory here. */
3695 return __ext4_get_inode_loc(inode, iloc,
3696 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3697 }
3698
3699 void ext4_set_inode_flags(struct inode *inode)
3700 {
3701 unsigned int flags = EXT4_I(inode)->i_flags;
3702
3703 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3704 if (flags & EXT4_SYNC_FL)
3705 inode->i_flags |= S_SYNC;
3706 if (flags & EXT4_APPEND_FL)
3707 inode->i_flags |= S_APPEND;
3708 if (flags & EXT4_IMMUTABLE_FL)
3709 inode->i_flags |= S_IMMUTABLE;
3710 if (flags & EXT4_NOATIME_FL)
3711 inode->i_flags |= S_NOATIME;
3712 if (flags & EXT4_DIRSYNC_FL)
3713 inode->i_flags |= S_DIRSYNC;
3714 }
3715
3716 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3717 void ext4_get_inode_flags(struct ext4_inode_info *ei)
3718 {
3719 unsigned int vfs_fl;
3720 unsigned long old_fl, new_fl;
3721
3722 do {
3723 vfs_fl = ei->vfs_inode.i_flags;
3724 old_fl = ei->i_flags;
3725 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3726 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3727 EXT4_DIRSYNC_FL);
3728 if (vfs_fl & S_SYNC)
3729 new_fl |= EXT4_SYNC_FL;
3730 if (vfs_fl & S_APPEND)
3731 new_fl |= EXT4_APPEND_FL;
3732 if (vfs_fl & S_IMMUTABLE)
3733 new_fl |= EXT4_IMMUTABLE_FL;
3734 if (vfs_fl & S_NOATIME)
3735 new_fl |= EXT4_NOATIME_FL;
3736 if (vfs_fl & S_DIRSYNC)
3737 new_fl |= EXT4_DIRSYNC_FL;
3738 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3739 }
3740
3741 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3742 struct ext4_inode_info *ei)
3743 {
3744 blkcnt_t i_blocks ;
3745 struct inode *inode = &(ei->vfs_inode);
3746 struct super_block *sb = inode->i_sb;
3747
3748 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3749 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3750 /* we are using combined 48 bit field */
3751 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3752 le32_to_cpu(raw_inode->i_blocks_lo);
3753 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3754 /* i_blocks represent file system block size */
3755 return i_blocks << (inode->i_blkbits - 9);
3756 } else {
3757 return i_blocks;
3758 }
3759 } else {
3760 return le32_to_cpu(raw_inode->i_blocks_lo);
3761 }
3762 }
3763
3764 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3765 {
3766 struct ext4_iloc iloc;
3767 struct ext4_inode *raw_inode;
3768 struct ext4_inode_info *ei;
3769 struct inode *inode;
3770 journal_t *journal = EXT4_SB(sb)->s_journal;
3771 long ret;
3772 int block;
3773
3774 inode = iget_locked(sb, ino);
3775 if (!inode)
3776 return ERR_PTR(-ENOMEM);
3777 if (!(inode->i_state & I_NEW))
3778 return inode;
3779
3780 ei = EXT4_I(inode);
3781 iloc.bh = NULL;
3782
3783 ret = __ext4_get_inode_loc(inode, &iloc, 0);
3784 if (ret < 0)
3785 goto bad_inode;
3786 raw_inode = ext4_raw_inode(&iloc);
3787 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3788 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3789 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3790 if (!(test_opt(inode->i_sb, NO_UID32))) {
3791 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3792 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3793 }
3794 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3795
3796 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
3797 ei->i_dir_start_lookup = 0;
3798 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3799 /* We now have enough fields to check if the inode was active or not.
3800 * This is needed because nfsd might try to access dead inodes
3801 * the test is that same one that e2fsck uses
3802 * NeilBrown 1999oct15
3803 */
3804 if (inode->i_nlink == 0) {
3805 if (inode->i_mode == 0 ||
3806 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3807 /* this inode is deleted */
3808 ret = -ESTALE;
3809 goto bad_inode;
3810 }
3811 /* The only unlinked inodes we let through here have
3812 * valid i_mode and are being read by the orphan
3813 * recovery code: that's fine, we're about to complete
3814 * the process of deleting those. */
3815 }
3816 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3817 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3818 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3819 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3820 ei->i_file_acl |=
3821 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3822 inode->i_size = ext4_isize(raw_inode);
3823 ei->i_disksize = inode->i_size;
3824 #ifdef CONFIG_QUOTA
3825 ei->i_reserved_quota = 0;
3826 #endif
3827 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3828 ei->i_block_group = iloc.block_group;
3829 ei->i_last_alloc_group = ~0;
3830 /*
3831 * NOTE! The in-memory inode i_data array is in little-endian order
3832 * even on big-endian machines: we do NOT byteswap the block numbers!
3833 */
3834 for (block = 0; block < EXT4_N_BLOCKS; block++)
3835 ei->i_data[block] = raw_inode->i_block[block];
3836 INIT_LIST_HEAD(&ei->i_orphan);
3837
3838 /*
3839 * Set transaction id's of transactions that have to be committed
3840 * to finish f[data]sync. We set them to currently running transaction
3841 * as we cannot be sure that the inode or some of its metadata isn't
3842 * part of the transaction - the inode could have been reclaimed and
3843 * now it is reread from disk.
3844 */
3845 if (journal) {
3846 transaction_t *transaction;
3847 tid_t tid;
3848
3849 read_lock(&journal->j_state_lock);
3850 if (journal->j_running_transaction)
3851 transaction = journal->j_running_transaction;
3852 else
3853 transaction = journal->j_committing_transaction;
3854 if (transaction)
3855 tid = transaction->t_tid;
3856 else
3857 tid = journal->j_commit_sequence;
3858 read_unlock(&journal->j_state_lock);
3859 ei->i_sync_tid = tid;
3860 ei->i_datasync_tid = tid;
3861 }
3862
3863 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3864 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3865 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3866 EXT4_INODE_SIZE(inode->i_sb)) {
3867 ret = -EIO;
3868 goto bad_inode;
3869 }
3870 if (ei->i_extra_isize == 0) {
3871 /* The extra space is currently unused. Use it. */
3872 ei->i_extra_isize = sizeof(struct ext4_inode) -
3873 EXT4_GOOD_OLD_INODE_SIZE;
3874 } else {
3875 __le32 *magic = (void *)raw_inode +
3876 EXT4_GOOD_OLD_INODE_SIZE +
3877 ei->i_extra_isize;
3878 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3879 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3880 }
3881 } else
3882 ei->i_extra_isize = 0;
3883
3884 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3885 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3886 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3887 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3888
3889 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3890 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3891 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3892 inode->i_version |=
3893 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
3894 }
3895
3896 ret = 0;
3897 if (ei->i_file_acl &&
3898 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3899 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3900 ei->i_file_acl);
3901 ret = -EIO;
3902 goto bad_inode;
3903 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3904 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3905 (S_ISLNK(inode->i_mode) &&
3906 !ext4_inode_is_fast_symlink(inode)))
3907 /* Validate extent which is part of inode */
3908 ret = ext4_ext_check_inode(inode);
3909 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3910 (S_ISLNK(inode->i_mode) &&
3911 !ext4_inode_is_fast_symlink(inode))) {
3912 /* Validate block references which are part of inode */
3913 ret = ext4_ind_check_inode(inode);
3914 }
3915 if (ret)
3916 goto bad_inode;
3917
3918 if (S_ISREG(inode->i_mode)) {
3919 inode->i_op = &ext4_file_inode_operations;
3920 inode->i_fop = &ext4_file_operations;
3921 ext4_set_aops(inode);
3922 } else if (S_ISDIR(inode->i_mode)) {
3923 inode->i_op = &ext4_dir_inode_operations;
3924 inode->i_fop = &ext4_dir_operations;
3925 } else if (S_ISLNK(inode->i_mode)) {
3926 if (ext4_inode_is_fast_symlink(inode)) {
3927 inode->i_op = &ext4_fast_symlink_inode_operations;
3928 nd_terminate_link(ei->i_data, inode->i_size,
3929 sizeof(ei->i_data) - 1);
3930 } else {
3931 inode->i_op = &ext4_symlink_inode_operations;
3932 ext4_set_aops(inode);
3933 }
3934 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3935 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3936 inode->i_op = &ext4_special_inode_operations;
3937 if (raw_inode->i_block[0])
3938 init_special_inode(inode, inode->i_mode,
3939 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3940 else
3941 init_special_inode(inode, inode->i_mode,
3942 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3943 } else {
3944 ret = -EIO;
3945 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3946 goto bad_inode;
3947 }
3948 brelse(iloc.bh);
3949 ext4_set_inode_flags(inode);
3950 unlock_new_inode(inode);
3951 return inode;
3952
3953 bad_inode:
3954 brelse(iloc.bh);
3955 iget_failed(inode);
3956 return ERR_PTR(ret);
3957 }
3958
3959 static int ext4_inode_blocks_set(handle_t *handle,
3960 struct ext4_inode *raw_inode,
3961 struct ext4_inode_info *ei)
3962 {
3963 struct inode *inode = &(ei->vfs_inode);
3964 u64 i_blocks = inode->i_blocks;
3965 struct super_block *sb = inode->i_sb;
3966
3967 if (i_blocks <= ~0U) {
3968 /*
3969 * i_blocks can be represnted in a 32 bit variable
3970 * as multiple of 512 bytes
3971 */
3972 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3973 raw_inode->i_blocks_high = 0;
3974 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3975 return 0;
3976 }
3977 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3978 return -EFBIG;
3979
3980 if (i_blocks <= 0xffffffffffffULL) {
3981 /*
3982 * i_blocks can be represented in a 48 bit variable
3983 * as multiple of 512 bytes
3984 */
3985 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3986 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3987 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3988 } else {
3989 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3990 /* i_block is stored in file system block size */
3991 i_blocks = i_blocks >> (inode->i_blkbits - 9);
3992 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3993 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3994 }
3995 return 0;
3996 }
3997
3998 /*
3999 * Post the struct inode info into an on-disk inode location in the
4000 * buffer-cache. This gobbles the caller's reference to the
4001 * buffer_head in the inode location struct.
4002 *
4003 * The caller must have write access to iloc->bh.
4004 */
4005 static int ext4_do_update_inode(handle_t *handle,
4006 struct inode *inode,
4007 struct ext4_iloc *iloc)
4008 {
4009 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4010 struct ext4_inode_info *ei = EXT4_I(inode);
4011 struct buffer_head *bh = iloc->bh;
4012 int err = 0, rc, block;
4013
4014 /* For fields not not tracking in the in-memory inode,
4015 * initialise them to zero for new inodes. */
4016 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4017 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4018
4019 ext4_get_inode_flags(ei);
4020 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4021 if (!(test_opt(inode->i_sb, NO_UID32))) {
4022 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4023 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4024 /*
4025 * Fix up interoperability with old kernels. Otherwise, old inodes get
4026 * re-used with the upper 16 bits of the uid/gid intact
4027 */
4028 if (!ei->i_dtime) {
4029 raw_inode->i_uid_high =
4030 cpu_to_le16(high_16_bits(inode->i_uid));
4031 raw_inode->i_gid_high =
4032 cpu_to_le16(high_16_bits(inode->i_gid));
4033 } else {
4034 raw_inode->i_uid_high = 0;
4035 raw_inode->i_gid_high = 0;
4036 }
4037 } else {
4038 raw_inode->i_uid_low =
4039 cpu_to_le16(fs_high2lowuid(inode->i_uid));
4040 raw_inode->i_gid_low =
4041 cpu_to_le16(fs_high2lowgid(inode->i_gid));
4042 raw_inode->i_uid_high = 0;
4043 raw_inode->i_gid_high = 0;
4044 }
4045 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4046
4047 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4048 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4049 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4050 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4051
4052 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4053 goto out_brelse;
4054 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4055 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4056 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4057 cpu_to_le32(EXT4_OS_HURD))
4058 raw_inode->i_file_acl_high =
4059 cpu_to_le16(ei->i_file_acl >> 32);
4060 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4061 ext4_isize_set(raw_inode, ei->i_disksize);
4062 if (ei->i_disksize > 0x7fffffffULL) {
4063 struct super_block *sb = inode->i_sb;
4064 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4065 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4066 EXT4_SB(sb)->s_es->s_rev_level ==
4067 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4068 /* If this is the first large file
4069 * created, add a flag to the superblock.
4070 */
4071 err = ext4_journal_get_write_access(handle,
4072 EXT4_SB(sb)->s_sbh);
4073 if (err)
4074 goto out_brelse;
4075 ext4_update_dynamic_rev(sb);
4076 EXT4_SET_RO_COMPAT_FEATURE(sb,
4077 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4078 sb->s_dirt = 1;
4079 ext4_handle_sync(handle);
4080 err = ext4_handle_dirty_metadata(handle, NULL,
4081 EXT4_SB(sb)->s_sbh);
4082 }
4083 }
4084 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4085 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4086 if (old_valid_dev(inode->i_rdev)) {
4087 raw_inode->i_block[0] =
4088 cpu_to_le32(old_encode_dev(inode->i_rdev));
4089 raw_inode->i_block[1] = 0;
4090 } else {
4091 raw_inode->i_block[0] = 0;
4092 raw_inode->i_block[1] =
4093 cpu_to_le32(new_encode_dev(inode->i_rdev));
4094 raw_inode->i_block[2] = 0;
4095 }
4096 } else
4097 for (block = 0; block < EXT4_N_BLOCKS; block++)
4098 raw_inode->i_block[block] = ei->i_data[block];
4099
4100 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4101 if (ei->i_extra_isize) {
4102 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4103 raw_inode->i_version_hi =
4104 cpu_to_le32(inode->i_version >> 32);
4105 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4106 }
4107
4108 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4109 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4110 if (!err)
4111 err = rc;
4112 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4113
4114 ext4_update_inode_fsync_trans(handle, inode, 0);
4115 out_brelse:
4116 brelse(bh);
4117 ext4_std_error(inode->i_sb, err);
4118 return err;
4119 }
4120
4121 /*
4122 * ext4_write_inode()
4123 *
4124 * We are called from a few places:
4125 *
4126 * - Within generic_file_write() for O_SYNC files.
4127 * Here, there will be no transaction running. We wait for any running
4128 * trasnaction to commit.
4129 *
4130 * - Within sys_sync(), kupdate and such.
4131 * We wait on commit, if tol to.
4132 *
4133 * - Within prune_icache() (PF_MEMALLOC == true)
4134 * Here we simply return. We can't afford to block kswapd on the
4135 * journal commit.
4136 *
4137 * In all cases it is actually safe for us to return without doing anything,
4138 * because the inode has been copied into a raw inode buffer in
4139 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4140 * knfsd.
4141 *
4142 * Note that we are absolutely dependent upon all inode dirtiers doing the
4143 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4144 * which we are interested.
4145 *
4146 * It would be a bug for them to not do this. The code:
4147 *
4148 * mark_inode_dirty(inode)
4149 * stuff();
4150 * inode->i_size = expr;
4151 *
4152 * is in error because a kswapd-driven write_inode() could occur while
4153 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4154 * will no longer be on the superblock's dirty inode list.
4155 */
4156 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4157 {
4158 int err;
4159
4160 if (current->flags & PF_MEMALLOC)
4161 return 0;
4162
4163 if (EXT4_SB(inode->i_sb)->s_journal) {
4164 if (ext4_journal_current_handle()) {
4165 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4166 dump_stack();
4167 return -EIO;
4168 }
4169
4170 if (wbc->sync_mode != WB_SYNC_ALL)
4171 return 0;
4172
4173 err = ext4_force_commit(inode->i_sb);
4174 } else {
4175 struct ext4_iloc iloc;
4176
4177 err = __ext4_get_inode_loc(inode, &iloc, 0);
4178 if (err)
4179 return err;
4180 if (wbc->sync_mode == WB_SYNC_ALL)
4181 sync_dirty_buffer(iloc.bh);
4182 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4183 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4184 "IO error syncing inode");
4185 err = -EIO;
4186 }
4187 brelse(iloc.bh);
4188 }
4189 return err;
4190 }
4191
4192 /*
4193 * ext4_setattr()
4194 *
4195 * Called from notify_change.
4196 *
4197 * We want to trap VFS attempts to truncate the file as soon as
4198 * possible. In particular, we want to make sure that when the VFS
4199 * shrinks i_size, we put the inode on the orphan list and modify
4200 * i_disksize immediately, so that during the subsequent flushing of
4201 * dirty pages and freeing of disk blocks, we can guarantee that any
4202 * commit will leave the blocks being flushed in an unused state on
4203 * disk. (On recovery, the inode will get truncated and the blocks will
4204 * be freed, so we have a strong guarantee that no future commit will
4205 * leave these blocks visible to the user.)
4206 *
4207 * Another thing we have to assure is that if we are in ordered mode
4208 * and inode is still attached to the committing transaction, we must
4209 * we start writeout of all the dirty pages which are being truncated.
4210 * This way we are sure that all the data written in the previous
4211 * transaction are already on disk (truncate waits for pages under
4212 * writeback).
4213 *
4214 * Called with inode->i_mutex down.
4215 */
4216 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4217 {
4218 struct inode *inode = dentry->d_inode;
4219 int error, rc = 0;
4220 int orphan = 0;
4221 const unsigned int ia_valid = attr->ia_valid;
4222
4223 error = inode_change_ok(inode, attr);
4224 if (error)
4225 return error;
4226
4227 if (is_quota_modification(inode, attr))
4228 dquot_initialize(inode);
4229 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4230 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4231 handle_t *handle;
4232
4233 /* (user+group)*(old+new) structure, inode write (sb,
4234 * inode block, ? - but truncate inode update has it) */
4235 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4236 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4237 if (IS_ERR(handle)) {
4238 error = PTR_ERR(handle);
4239 goto err_out;
4240 }
4241 error = dquot_transfer(inode, attr);
4242 if (error) {
4243 ext4_journal_stop(handle);
4244 return error;
4245 }
4246 /* Update corresponding info in inode so that everything is in
4247 * one transaction */
4248 if (attr->ia_valid & ATTR_UID)
4249 inode->i_uid = attr->ia_uid;
4250 if (attr->ia_valid & ATTR_GID)
4251 inode->i_gid = attr->ia_gid;
4252 error = ext4_mark_inode_dirty(handle, inode);
4253 ext4_journal_stop(handle);
4254 }
4255
4256 if (attr->ia_valid & ATTR_SIZE) {
4257 inode_dio_wait(inode);
4258
4259 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4260 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4261
4262 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4263 return -EFBIG;
4264 }
4265 }
4266
4267 if (S_ISREG(inode->i_mode) &&
4268 attr->ia_valid & ATTR_SIZE &&
4269 (attr->ia_size < inode->i_size)) {
4270 handle_t *handle;
4271
4272 handle = ext4_journal_start(inode, 3);
4273 if (IS_ERR(handle)) {
4274 error = PTR_ERR(handle);
4275 goto err_out;
4276 }
4277 if (ext4_handle_valid(handle)) {
4278 error = ext4_orphan_add(handle, inode);
4279 orphan = 1;
4280 }
4281 EXT4_I(inode)->i_disksize = attr->ia_size;
4282 rc = ext4_mark_inode_dirty(handle, inode);
4283 if (!error)
4284 error = rc;
4285 ext4_journal_stop(handle);
4286
4287 if (ext4_should_order_data(inode)) {
4288 error = ext4_begin_ordered_truncate(inode,
4289 attr->ia_size);
4290 if (error) {
4291 /* Do as much error cleanup as possible */
4292 handle = ext4_journal_start(inode, 3);
4293 if (IS_ERR(handle)) {
4294 ext4_orphan_del(NULL, inode);
4295 goto err_out;
4296 }
4297 ext4_orphan_del(handle, inode);
4298 orphan = 0;
4299 ext4_journal_stop(handle);
4300 goto err_out;
4301 }
4302 }
4303 }
4304
4305 if (attr->ia_valid & ATTR_SIZE) {
4306 if (attr->ia_size != i_size_read(inode)) {
4307 truncate_setsize(inode, attr->ia_size);
4308 ext4_truncate(inode);
4309 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
4310 ext4_truncate(inode);
4311 }
4312
4313 if (!rc) {
4314 setattr_copy(inode, attr);
4315 mark_inode_dirty(inode);
4316 }
4317
4318 /*
4319 * If the call to ext4_truncate failed to get a transaction handle at
4320 * all, we need to clean up the in-core orphan list manually.
4321 */
4322 if (orphan && inode->i_nlink)
4323 ext4_orphan_del(NULL, inode);
4324
4325 if (!rc && (ia_valid & ATTR_MODE))
4326 rc = ext4_acl_chmod(inode);
4327
4328 err_out:
4329 ext4_std_error(inode->i_sb, error);
4330 if (!error)
4331 error = rc;
4332 return error;
4333 }
4334
4335 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4336 struct kstat *stat)
4337 {
4338 struct inode *inode;
4339 unsigned long delalloc_blocks;
4340
4341 inode = dentry->d_inode;
4342 generic_fillattr(inode, stat);
4343
4344 /*
4345 * We can't update i_blocks if the block allocation is delayed
4346 * otherwise in the case of system crash before the real block
4347 * allocation is done, we will have i_blocks inconsistent with
4348 * on-disk file blocks.
4349 * We always keep i_blocks updated together with real
4350 * allocation. But to not confuse with user, stat
4351 * will return the blocks that include the delayed allocation
4352 * blocks for this file.
4353 */
4354 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
4355
4356 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4357 return 0;
4358 }
4359
4360 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4361 {
4362 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4363 return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4364 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4365 }
4366
4367 /*
4368 * Account for index blocks, block groups bitmaps and block group
4369 * descriptor blocks if modify datablocks and index blocks
4370 * worse case, the indexs blocks spread over different block groups
4371 *
4372 * If datablocks are discontiguous, they are possible to spread over
4373 * different block groups too. If they are contiuguous, with flexbg,
4374 * they could still across block group boundary.
4375 *
4376 * Also account for superblock, inode, quota and xattr blocks
4377 */
4378 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4379 {
4380 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4381 int gdpblocks;
4382 int idxblocks;
4383 int ret = 0;
4384
4385 /*
4386 * How many index blocks need to touch to modify nrblocks?
4387 * The "Chunk" flag indicating whether the nrblocks is
4388 * physically contiguous on disk
4389 *
4390 * For Direct IO and fallocate, they calls get_block to allocate
4391 * one single extent at a time, so they could set the "Chunk" flag
4392 */
4393 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4394
4395 ret = idxblocks;
4396
4397 /*
4398 * Now let's see how many group bitmaps and group descriptors need
4399 * to account
4400 */
4401 groups = idxblocks;
4402 if (chunk)
4403 groups += 1;
4404 else
4405 groups += nrblocks;
4406
4407 gdpblocks = groups;
4408 if (groups > ngroups)
4409 groups = ngroups;
4410 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4411 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4412
4413 /* bitmaps and block group descriptor blocks */
4414 ret += groups + gdpblocks;
4415
4416 /* Blocks for super block, inode, quota and xattr blocks */
4417 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4418
4419 return ret;
4420 }
4421
4422 /*
4423 * Calculate the total number of credits to reserve to fit
4424 * the modification of a single pages into a single transaction,
4425 * which may include multiple chunks of block allocations.
4426 *
4427 * This could be called via ext4_write_begin()
4428 *
4429 * We need to consider the worse case, when
4430 * one new block per extent.
4431 */
4432 int ext4_writepage_trans_blocks(struct inode *inode)
4433 {
4434 int bpp = ext4_journal_blocks_per_page(inode);
4435 int ret;
4436
4437 ret = ext4_meta_trans_blocks(inode, bpp, 0);
4438
4439 /* Account for data blocks for journalled mode */
4440 if (ext4_should_journal_data(inode))
4441 ret += bpp;
4442 return ret;
4443 }
4444
4445 /*
4446 * Calculate the journal credits for a chunk of data modification.
4447 *
4448 * This is called from DIO, fallocate or whoever calling
4449 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4450 *
4451 * journal buffers for data blocks are not included here, as DIO
4452 * and fallocate do no need to journal data buffers.
4453 */
4454 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4455 {
4456 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4457 }
4458
4459 /*
4460 * The caller must have previously called ext4_reserve_inode_write().
4461 * Give this, we know that the caller already has write access to iloc->bh.
4462 */
4463 int ext4_mark_iloc_dirty(handle_t *handle,
4464 struct inode *inode, struct ext4_iloc *iloc)
4465 {
4466 int err = 0;
4467
4468 if (test_opt(inode->i_sb, I_VERSION))
4469 inode_inc_iversion(inode);
4470
4471 /* the do_update_inode consumes one bh->b_count */
4472 get_bh(iloc->bh);
4473
4474 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4475 err = ext4_do_update_inode(handle, inode, iloc);
4476 put_bh(iloc->bh);
4477 return err;
4478 }
4479
4480 /*
4481 * On success, We end up with an outstanding reference count against
4482 * iloc->bh. This _must_ be cleaned up later.
4483 */
4484
4485 int
4486 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4487 struct ext4_iloc *iloc)
4488 {
4489 int err;
4490
4491 err = ext4_get_inode_loc(inode, iloc);
4492 if (!err) {
4493 BUFFER_TRACE(iloc->bh, "get_write_access");
4494 err = ext4_journal_get_write_access(handle, iloc->bh);
4495 if (err) {
4496 brelse(iloc->bh);
4497 iloc->bh = NULL;
4498 }
4499 }
4500 ext4_std_error(inode->i_sb, err);
4501 return err;
4502 }
4503
4504 /*
4505 * Expand an inode by new_extra_isize bytes.
4506 * Returns 0 on success or negative error number on failure.
4507 */
4508 static int ext4_expand_extra_isize(struct inode *inode,
4509 unsigned int new_extra_isize,
4510 struct ext4_iloc iloc,
4511 handle_t *handle)
4512 {
4513 struct ext4_inode *raw_inode;
4514 struct ext4_xattr_ibody_header *header;
4515
4516 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4517 return 0;
4518
4519 raw_inode = ext4_raw_inode(&iloc);
4520
4521 header = IHDR(inode, raw_inode);
4522
4523 /* No extended attributes present */
4524 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4525 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4526 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4527 new_extra_isize);
4528 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4529 return 0;
4530 }
4531
4532 /* try to expand with EAs present */
4533 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4534 raw_inode, handle);
4535 }
4536
4537 /*
4538 * What we do here is to mark the in-core inode as clean with respect to inode
4539 * dirtiness (it may still be data-dirty).
4540 * This means that the in-core inode may be reaped by prune_icache
4541 * without having to perform any I/O. This is a very good thing,
4542 * because *any* task may call prune_icache - even ones which
4543 * have a transaction open against a different journal.
4544 *
4545 * Is this cheating? Not really. Sure, we haven't written the
4546 * inode out, but prune_icache isn't a user-visible syncing function.
4547 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4548 * we start and wait on commits.
4549 *
4550 * Is this efficient/effective? Well, we're being nice to the system
4551 * by cleaning up our inodes proactively so they can be reaped
4552 * without I/O. But we are potentially leaving up to five seconds'
4553 * worth of inodes floating about which prune_icache wants us to
4554 * write out. One way to fix that would be to get prune_icache()
4555 * to do a write_super() to free up some memory. It has the desired
4556 * effect.
4557 */
4558 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4559 {
4560 struct ext4_iloc iloc;
4561 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4562 static unsigned int mnt_count;
4563 int err, ret;
4564
4565 might_sleep();
4566 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4567 err = ext4_reserve_inode_write(handle, inode, &iloc);
4568 if (ext4_handle_valid(handle) &&
4569 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4570 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4571 /*
4572 * We need extra buffer credits since we may write into EA block
4573 * with this same handle. If journal_extend fails, then it will
4574 * only result in a minor loss of functionality for that inode.
4575 * If this is felt to be critical, then e2fsck should be run to
4576 * force a large enough s_min_extra_isize.
4577 */
4578 if ((jbd2_journal_extend(handle,
4579 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4580 ret = ext4_expand_extra_isize(inode,
4581 sbi->s_want_extra_isize,
4582 iloc, handle);
4583 if (ret) {
4584 ext4_set_inode_state(inode,
4585 EXT4_STATE_NO_EXPAND);
4586 if (mnt_count !=
4587 le16_to_cpu(sbi->s_es->s_mnt_count)) {
4588 ext4_warning(inode->i_sb,
4589 "Unable to expand inode %lu. Delete"
4590 " some EAs or run e2fsck.",
4591 inode->i_ino);
4592 mnt_count =
4593 le16_to_cpu(sbi->s_es->s_mnt_count);
4594 }
4595 }
4596 }
4597 }
4598 if (!err)
4599 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4600 return err;
4601 }
4602
4603 /*
4604 * ext4_dirty_inode() is called from __mark_inode_dirty()
4605 *
4606 * We're really interested in the case where a file is being extended.
4607 * i_size has been changed by generic_commit_write() and we thus need
4608 * to include the updated inode in the current transaction.
4609 *
4610 * Also, dquot_alloc_block() will always dirty the inode when blocks
4611 * are allocated to the file.
4612 *
4613 * If the inode is marked synchronous, we don't honour that here - doing
4614 * so would cause a commit on atime updates, which we don't bother doing.
4615 * We handle synchronous inodes at the highest possible level.
4616 */
4617 void ext4_dirty_inode(struct inode *inode, int flags)
4618 {
4619 handle_t *handle;
4620
4621 handle = ext4_journal_start(inode, 2);
4622 if (IS_ERR(handle))
4623 goto out;
4624
4625 ext4_mark_inode_dirty(handle, inode);
4626
4627 ext4_journal_stop(handle);
4628 out:
4629 return;
4630 }
4631
4632 #if 0
4633 /*
4634 * Bind an inode's backing buffer_head into this transaction, to prevent
4635 * it from being flushed to disk early. Unlike
4636 * ext4_reserve_inode_write, this leaves behind no bh reference and
4637 * returns no iloc structure, so the caller needs to repeat the iloc
4638 * lookup to mark the inode dirty later.
4639 */
4640 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4641 {
4642 struct ext4_iloc iloc;
4643
4644 int err = 0;
4645 if (handle) {
4646 err = ext4_get_inode_loc(inode, &iloc);
4647 if (!err) {
4648 BUFFER_TRACE(iloc.bh, "get_write_access");
4649 err = jbd2_journal_get_write_access(handle, iloc.bh);
4650 if (!err)
4651 err = ext4_handle_dirty_metadata(handle,
4652 NULL,
4653 iloc.bh);
4654 brelse(iloc.bh);
4655 }
4656 }
4657 ext4_std_error(inode->i_sb, err);
4658 return err;
4659 }
4660 #endif
4661
4662 int ext4_change_inode_journal_flag(struct inode *inode, int val)
4663 {
4664 journal_t *journal;
4665 handle_t *handle;
4666 int err;
4667
4668 /*
4669 * We have to be very careful here: changing a data block's
4670 * journaling status dynamically is dangerous. If we write a
4671 * data block to the journal, change the status and then delete
4672 * that block, we risk forgetting to revoke the old log record
4673 * from the journal and so a subsequent replay can corrupt data.
4674 * So, first we make sure that the journal is empty and that
4675 * nobody is changing anything.
4676 */
4677
4678 journal = EXT4_JOURNAL(inode);
4679 if (!journal)
4680 return 0;
4681 if (is_journal_aborted(journal))
4682 return -EROFS;
4683
4684 jbd2_journal_lock_updates(journal);
4685 jbd2_journal_flush(journal);
4686
4687 /*
4688 * OK, there are no updates running now, and all cached data is
4689 * synced to disk. We are now in a completely consistent state
4690 * which doesn't have anything in the journal, and we know that
4691 * no filesystem updates are running, so it is safe to modify
4692 * the inode's in-core data-journaling state flag now.
4693 */
4694
4695 if (val)
4696 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4697 else
4698 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4699 ext4_set_aops(inode);
4700
4701 jbd2_journal_unlock_updates(journal);
4702
4703 /* Finally we can mark the inode as dirty. */
4704
4705 handle = ext4_journal_start(inode, 1);
4706 if (IS_ERR(handle))
4707 return PTR_ERR(handle);
4708
4709 err = ext4_mark_inode_dirty(handle, inode);
4710 ext4_handle_sync(handle);
4711 ext4_journal_stop(handle);
4712 ext4_std_error(inode->i_sb, err);
4713
4714 return err;
4715 }
4716
4717 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4718 {
4719 return !buffer_mapped(bh);
4720 }
4721
4722 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4723 {
4724 struct page *page = vmf->page;
4725 loff_t size;
4726 unsigned long len;
4727 int ret;
4728 struct file *file = vma->vm_file;
4729 struct inode *inode = file->f_path.dentry->d_inode;
4730 struct address_space *mapping = inode->i_mapping;
4731 handle_t *handle;
4732 get_block_t *get_block;
4733 int retries = 0;
4734
4735 /*
4736 * This check is racy but catches the common case. We rely on
4737 * __block_page_mkwrite() to do a reliable check.
4738 */
4739 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
4740 /* Delalloc case is easy... */
4741 if (test_opt(inode->i_sb, DELALLOC) &&
4742 !ext4_should_journal_data(inode) &&
4743 !ext4_nonda_switch(inode->i_sb)) {
4744 do {
4745 ret = __block_page_mkwrite(vma, vmf,
4746 ext4_da_get_block_prep);
4747 } while (ret == -ENOSPC &&
4748 ext4_should_retry_alloc(inode->i_sb, &retries));
4749 goto out_ret;
4750 }
4751
4752 lock_page(page);
4753 size = i_size_read(inode);
4754 /* Page got truncated from under us? */
4755 if (page->mapping != mapping || page_offset(page) > size) {
4756 unlock_page(page);
4757 ret = VM_FAULT_NOPAGE;
4758 goto out;
4759 }
4760
4761 if (page->index == size >> PAGE_CACHE_SHIFT)
4762 len = size & ~PAGE_CACHE_MASK;
4763 else
4764 len = PAGE_CACHE_SIZE;
4765 /*
4766 * Return if we have all the buffers mapped. This avoids the need to do
4767 * journal_start/journal_stop which can block and take a long time
4768 */
4769 if (page_has_buffers(page)) {
4770 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4771 ext4_bh_unmapped)) {
4772 /* Wait so that we don't change page under IO */
4773 wait_on_page_writeback(page);
4774 ret = VM_FAULT_LOCKED;
4775 goto out;
4776 }
4777 }
4778 unlock_page(page);
4779 /* OK, we need to fill the hole... */
4780 if (ext4_should_dioread_nolock(inode))
4781 get_block = ext4_get_block_write;
4782 else
4783 get_block = ext4_get_block;
4784 retry_alloc:
4785 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
4786 if (IS_ERR(handle)) {
4787 ret = VM_FAULT_SIGBUS;
4788 goto out;
4789 }
4790 ret = __block_page_mkwrite(vma, vmf, get_block);
4791 if (!ret && ext4_should_journal_data(inode)) {
4792 if (walk_page_buffers(handle, page_buffers(page), 0,
4793 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4794 unlock_page(page);
4795 ret = VM_FAULT_SIGBUS;
4796 ext4_journal_stop(handle);
4797 goto out;
4798 }
4799 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4800 }
4801 ext4_journal_stop(handle);
4802 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4803 goto retry_alloc;
4804 out_ret:
4805 ret = block_page_mkwrite_return(ret);
4806 out:
4807 return ret;
4808 }
This page took 0.12739 seconds and 5 git commands to generate.