ext4: defer clearing of PageWriteback after extent conversion
[deliverable/linux.git] / fs / ext4 / page-io.c
CommitLineData
bd2d0210
TT
1/*
2 * linux/fs/ext4/page-io.c
3 *
4 * This contains the new page_io functions for ext4
5 *
6 * Written by Theodore Ts'o, 2010.
7 */
8
bd2d0210
TT
9#include <linux/fs.h>
10#include <linux/time.h>
11#include <linux/jbd2.h>
12#include <linux/highuid.h>
13#include <linux/pagemap.h>
14#include <linux/quotaops.h>
15#include <linux/string.h>
16#include <linux/buffer_head.h>
17#include <linux/writeback.h>
18#include <linux/pagevec.h>
19#include <linux/mpage.h>
20#include <linux/namei.h>
a27bb332 21#include <linux/aio.h>
bd2d0210
TT
22#include <linux/uio.h>
23#include <linux/bio.h>
24#include <linux/workqueue.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
1ae48a63 27#include <linux/mm.h>
bd2d0210
TT
28
29#include "ext4_jbd2.h"
30#include "xattr.h"
31#include "acl.h"
bd2d0210 32
0058f965 33static struct kmem_cache *io_end_cachep;
bd2d0210 34
5dabfc78 35int __init ext4_init_pageio(void)
bd2d0210 36{
bd2d0210 37 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
0058f965 38 if (io_end_cachep == NULL)
bd2d0210 39 return -ENOMEM;
bd2d0210
TT
40 return 0;
41}
42
5dabfc78 43void ext4_exit_pageio(void)
bd2d0210
TT
44{
45 kmem_cache_destroy(io_end_cachep);
bd2d0210
TT
46}
47
1ada47d9
TT
48/*
49 * This function is called by ext4_evict_inode() to make sure there is
50 * no more pending I/O completion work left to do.
51 */
52void ext4_ioend_shutdown(struct inode *inode)
f7ad6d2e 53{
e9e3bcec 54 wait_queue_head_t *wq = ext4_ioend_wq(inode);
f7ad6d2e
TT
55
56 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
1ada47d9
TT
57 /*
58 * We need to make sure the work structure is finished being
59 * used before we let the inode get destroyed.
60 */
2e8fa54e
JK
61 if (work_pending(&EXT4_I(inode)->i_rsv_conversion_work))
62 cancel_work_sync(&EXT4_I(inode)->i_rsv_conversion_work);
63 if (work_pending(&EXT4_I(inode)->i_unrsv_conversion_work))
64 cancel_work_sync(&EXT4_I(inode)->i_unrsv_conversion_work);
f7ad6d2e
TT
65}
66
b0857d30
JK
67/*
68 * Print an buffer I/O error compatible with the fs/buffer.c. This
69 * provides compatibility with dmesg scrapers that look for a specific
70 * buffer I/O error message. We really need a unified error reporting
71 * structure to userspace ala Digital Unix's uerf system, but it's
72 * probably not going to happen in my lifetime, due to LKML politics...
73 */
74static void buffer_io_error(struct buffer_head *bh)
75{
76 char b[BDEVNAME_SIZE];
77 printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
78 bdevname(bh->b_bdev, b),
79 (unsigned long long)bh->b_blocknr);
80}
81
82static void ext4_finish_bio(struct bio *bio)
83{
84 int i;
85 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
86
87 for (i = 0; i < bio->bi_vcnt; i++) {
88 struct bio_vec *bvec = &bio->bi_io_vec[i];
89 struct page *page = bvec->bv_page;
90 struct buffer_head *bh, *head;
91 unsigned bio_start = bvec->bv_offset;
92 unsigned bio_end = bio_start + bvec->bv_len;
93 unsigned under_io = 0;
94 unsigned long flags;
95
96 if (!page)
97 continue;
98
99 if (error) {
100 SetPageError(page);
101 set_bit(AS_EIO, &page->mapping->flags);
102 }
103 bh = head = page_buffers(page);
104 /*
105 * We check all buffers in the page under BH_Uptodate_Lock
106 * to avoid races with other end io clearing async_write flags
107 */
108 local_irq_save(flags);
109 bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
110 do {
111 if (bh_offset(bh) < bio_start ||
112 bh_offset(bh) + bh->b_size > bio_end) {
113 if (buffer_async_write(bh))
114 under_io++;
115 continue;
116 }
117 clear_buffer_async_write(bh);
118 if (error)
119 buffer_io_error(bh);
120 } while ((bh = bh->b_this_page) != head);
121 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
122 local_irq_restore(flags);
123 if (!under_io)
124 end_page_writeback(page);
125 }
126}
127
97a851ed 128static void ext4_release_io_end(ext4_io_end_t *io_end)
bd2d0210 129{
b0857d30
JK
130 struct bio *bio, *next_bio;
131
97a851ed
JK
132 BUG_ON(!list_empty(&io_end->list));
133 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
6b523df4 134 WARN_ON(io_end->handle);
97a851ed
JK
135
136 if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
137 wake_up_all(ext4_ioend_wq(io_end->inode));
b0857d30
JK
138
139 for (bio = io_end->bio; bio; bio = next_bio) {
140 next_bio = bio->bi_private;
141 ext4_finish_bio(bio);
142 bio_put(bio);
143 }
97a851ed
JK
144 if (io_end->flag & EXT4_IO_END_DIRECT)
145 inode_dio_done(io_end->inode);
146 if (io_end->iocb)
147 aio_complete(io_end->iocb, io_end->result, 0);
148 kmem_cache_free(io_end_cachep, io_end);
149}
150
151static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
152{
153 struct inode *inode = io_end->inode;
82e54229 154
97a851ed
JK
155 io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
156 /* Wake up anyone waiting on unwritten extent conversion */
157 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
158 wake_up_all(ext4_ioend_wq(inode));
bd2d0210
TT
159}
160
28a535f9
DM
161/* check a range of space and convert unwritten extents to written. */
162static int ext4_end_io(ext4_io_end_t *io)
bd2d0210
TT
163{
164 struct inode *inode = io->inode;
165 loff_t offset = io->offset;
166 ssize_t size = io->size;
6b523df4 167 handle_t *handle = io->handle;
bd2d0210
TT
168 int ret = 0;
169
170 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
171 "list->prev 0x%p\n",
172 io, inode->i_ino, io->list.next, io->list.prev);
173
6b523df4
JK
174 io->handle = NULL; /* Following call will use up the handle */
175 ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
bd2d0210 176 if (ret < 0) {
b82e384c
TT
177 ext4_msg(inode->i_sb, KERN_EMERG,
178 "failed to convert unwritten extents to written "
179 "extents -- potential data loss! "
180 "(inode %lu, offset %llu, size %zd, error %d)",
181 inode->i_ino, offset, size, ret);
bd2d0210 182 }
97a851ed
JK
183 ext4_clear_io_unwritten_flag(io);
184 ext4_release_io_end(io);
bd2d0210
TT
185 return ret;
186}
187
2e8fa54e 188static void dump_completed_IO(struct inode *inode, struct list_head *head)
28a535f9
DM
189{
190#ifdef EXT4FS_DEBUG
191 struct list_head *cur, *before, *after;
192 ext4_io_end_t *io, *io0, *io1;
28a535f9 193
2e8fa54e 194 if (list_empty(head))
28a535f9 195 return;
28a535f9 196
2e8fa54e
JK
197 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
198 list_for_each_entry(io, head, list) {
28a535f9
DM
199 cur = &io->list;
200 before = cur->prev;
201 io0 = container_of(before, ext4_io_end_t, list);
202 after = cur->next;
203 io1 = container_of(after, ext4_io_end_t, list);
204
205 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
206 io, inode->i_ino, io0, io1);
207 }
208#endif
209}
210
211/* Add the io_end to per-inode completed end_io list. */
97a851ed 212static void ext4_add_complete_io(ext4_io_end_t *io_end)
bd2d0210 213{
28a535f9
DM
214 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
215 struct workqueue_struct *wq;
216 unsigned long flags;
217
218 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
d73d5046 219 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2e8fa54e
JK
220 if (io_end->handle) {
221 wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
222 if (list_empty(&ei->i_rsv_conversion_list))
223 queue_work(wq, &ei->i_rsv_conversion_work);
224 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
225 } else {
226 wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
227 if (list_empty(&ei->i_unrsv_conversion_list))
228 queue_work(wq, &ei->i_unrsv_conversion_work);
229 list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
230 }
28a535f9
DM
231 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
232}
d73d5046 233
2e8fa54e
JK
234static int ext4_do_flush_completed_IO(struct inode *inode,
235 struct list_head *head)
28a535f9
DM
236{
237 ext4_io_end_t *io;
002bd7fa 238 struct list_head unwritten;
28a535f9
DM
239 unsigned long flags;
240 struct ext4_inode_info *ei = EXT4_I(inode);
241 int err, ret = 0;
242
28a535f9 243 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2e8fa54e
JK
244 dump_completed_IO(inode, head);
245 list_replace_init(head, &unwritten);
28a535f9
DM
246 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
247
248 while (!list_empty(&unwritten)) {
249 io = list_entry(unwritten.next, ext4_io_end_t, list);
250 BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
251 list_del_init(&io->list);
252
253 err = ext4_end_io(io);
254 if (unlikely(!ret && err))
255 ret = err;
28a535f9
DM
256 }
257 return ret;
258}
259
260/*
2e8fa54e 261 * work on completed IO, to convert unwritten extents to extents
28a535f9 262 */
2e8fa54e
JK
263void ext4_end_io_rsv_work(struct work_struct *work)
264{
265 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
266 i_rsv_conversion_work);
267 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
268}
269
270void ext4_end_io_unrsv_work(struct work_struct *work)
28a535f9 271{
84c17543 272 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
2e8fa54e
JK
273 i_unrsv_conversion_work);
274 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
28a535f9
DM
275}
276
c278531d 277int ext4_flush_unwritten_io(struct inode *inode)
28a535f9 278{
2e8fa54e
JK
279 int ret, err;
280
c278531d
DM
281 WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
282 !(inode->i_state & I_FREEING));
2e8fa54e
JK
283 ret = ext4_do_flush_completed_IO(inode,
284 &EXT4_I(inode)->i_rsv_conversion_list);
285 err = ext4_do_flush_completed_IO(inode,
286 &EXT4_I(inode)->i_unrsv_conversion_list);
287 if (!ret)
288 ret = err;
c278531d
DM
289 ext4_unwritten_wait(inode);
290 return ret;
bd2d0210
TT
291}
292
293ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
294{
b17b35ec 295 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
bd2d0210 296 if (io) {
f7ad6d2e
TT
297 atomic_inc(&EXT4_I(inode)->i_ioend_count);
298 io->inode = inode;
bd2d0210 299 INIT_LIST_HEAD(&io->list);
97a851ed 300 atomic_set(&io->count, 1);
bd2d0210
TT
301 }
302 return io;
303}
304
97a851ed
JK
305void ext4_put_io_end_defer(ext4_io_end_t *io_end)
306{
307 if (atomic_dec_and_test(&io_end->count)) {
308 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
309 ext4_release_io_end(io_end);
310 return;
311 }
312 ext4_add_complete_io(io_end);
313 }
314}
315
316int ext4_put_io_end(ext4_io_end_t *io_end)
317{
318 int err = 0;
319
320 if (atomic_dec_and_test(&io_end->count)) {
321 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
6b523df4
JK
322 err = ext4_convert_unwritten_extents(io_end->handle,
323 io_end->inode, io_end->offset,
324 io_end->size);
325 io_end->handle = NULL;
97a851ed
JK
326 ext4_clear_io_unwritten_flag(io_end);
327 }
328 ext4_release_io_end(io_end);
329 }
330 return err;
331}
332
333ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
334{
335 atomic_inc(&io_end->count);
336 return io_end;
337}
338
bd2d0210
TT
339static void ext4_end_bio(struct bio *bio, int error)
340{
341 ext4_io_end_t *io_end = bio->bi_private;
d50bdd5a 342 sector_t bi_sector = bio->bi_sector;
bd2d0210
TT
343
344 BUG_ON(!io_end);
bd2d0210
TT
345 bio->bi_end_io = NULL;
346 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
347 error = 0;
0058f965 348
b0857d30 349 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
0058f965 350 /*
b0857d30
JK
351 * Link bio into list hanging from io_end. We have to do it
352 * atomically as bio completions can be racing against each
353 * other.
0058f965 354 */
b0857d30
JK
355 bio->bi_private = xchg(&io_end->bio, bio);
356 } else {
357 ext4_finish_bio(bio);
358 bio_put(bio);
bd2d0210 359 }
f7ad6d2e
TT
360
361 if (error) {
b0857d30
JK
362 struct inode *inode = io_end->inode;
363
f7ad6d2e
TT
364 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
365 "(offset %llu size %ld starting block %llu)",
366 inode->i_ino,
367 (unsigned long long) io_end->offset,
368 (long) io_end->size,
369 (unsigned long long)
d50bdd5a 370 bi_sector >> (inode->i_blkbits - 9));
f7ad6d2e 371 }
97a851ed 372 ext4_put_io_end_defer(io_end);
bd2d0210
TT
373}
374
375void ext4_io_submit(struct ext4_io_submit *io)
376{
377 struct bio *bio = io->io_bio;
378
379 if (bio) {
380 bio_get(io->io_bio);
381 submit_bio(io->io_op, io->io_bio);
382 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
383 bio_put(io->io_bio);
384 }
7dc57615 385 io->io_bio = NULL;
97a851ed
JK
386}
387
388void ext4_io_submit_init(struct ext4_io_submit *io,
389 struct writeback_control *wbc)
390{
391 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
392 io->io_bio = NULL;
7dc57615 393 io->io_end = NULL;
bd2d0210
TT
394}
395
97a851ed
JK
396static int io_submit_init_bio(struct ext4_io_submit *io,
397 struct buffer_head *bh)
bd2d0210 398{
bd2d0210
TT
399 int nvecs = bio_get_nr_vecs(bh->b_bdev);
400 struct bio *bio;
401
275d3ba6 402 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bd2d0210
TT
403 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
404 bio->bi_bdev = bh->b_bdev;
bd2d0210 405 bio->bi_end_io = ext4_end_bio;
97a851ed 406 bio->bi_private = ext4_get_io_end(io->io_end);
bd2d0210 407 io->io_bio = bio;
bd2d0210
TT
408 io->io_next_block = bh->b_blocknr;
409 return 0;
410}
411
412static int io_submit_add_bh(struct ext4_io_submit *io,
bd2d0210 413 struct inode *inode,
bd2d0210
TT
414 struct buffer_head *bh)
415{
bd2d0210
TT
416 int ret;
417
bd2d0210
TT
418 if (io->io_bio && bh->b_blocknr != io->io_next_block) {
419submit_and_retry:
420 ext4_io_submit(io);
421 }
422 if (io->io_bio == NULL) {
97a851ed 423 ret = io_submit_init_bio(io, bh);
bd2d0210
TT
424 if (ret)
425 return ret;
426 }
97a851ed
JK
427 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
428 if (ret != bh->b_size)
429 goto submit_and_retry;
bd2d0210 430 io->io_next_block++;
bd2d0210
TT
431 return 0;
432}
433
434int ext4_bio_write_page(struct ext4_io_submit *io,
435 struct page *page,
436 int len,
437 struct writeback_control *wbc)
438{
439 struct inode *inode = page->mapping->host;
0058f965 440 unsigned block_start, blocksize;
bd2d0210
TT
441 struct buffer_head *bh, *head;
442 int ret = 0;
0058f965 443 int nr_submitted = 0;
bd2d0210
TT
444
445 blocksize = 1 << inode->i_blkbits;
446
d50bdd5a 447 BUG_ON(!PageLocked(page));
bd2d0210 448 BUG_ON(PageWriteback(page));
bd2d0210 449
a54aa761
TT
450 set_page_writeback(page);
451 ClearPageError(page);
bd2d0210 452
0058f965
JK
453 /*
454 * In the first loop we prepare and mark buffers to submit. We have to
455 * mark all buffers in the page before submitting so that
456 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
457 * on the first buffer finishes and we are still working on submitting
458 * the second buffer.
459 */
460 bh = head = page_buffers(page);
461 do {
462 block_start = bh_offset(bh);
bd2d0210 463 if (block_start >= len) {
5a0dc736
YY
464 /*
465 * Comments copied from block_write_full_page_endio:
466 *
467 * The page straddles i_size. It must be zeroed out on
468 * each and every writepage invocation because it may
469 * be mmapped. "A file is mapped in multiples of the
470 * page size. For a file that is not a multiple of
471 * the page size, the remaining memory is zeroed when
472 * mapped, and writes to that region are not written
473 * out to the file."
474 */
0058f965
JK
475 zero_user_segment(page, block_start,
476 block_start + blocksize);
bd2d0210
TT
477 clear_buffer_dirty(bh);
478 set_buffer_uptodate(bh);
479 continue;
480 }
8a850c3f
JK
481 if (!buffer_dirty(bh) || buffer_delay(bh) ||
482 !buffer_mapped(bh) || buffer_unwritten(bh)) {
483 /* A hole? We can safely clear the dirty bit */
484 if (!buffer_mapped(bh))
485 clear_buffer_dirty(bh);
486 if (io->io_bio)
487 ext4_io_submit(io);
488 continue;
489 }
0058f965
JK
490 if (buffer_new(bh)) {
491 clear_buffer_new(bh);
492 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
493 }
494 set_buffer_async_write(bh);
495 } while ((bh = bh->b_this_page) != head);
496
497 /* Now submit buffers to write */
498 bh = head = page_buffers(page);
499 do {
500 if (!buffer_async_write(bh))
501 continue;
97a851ed 502 ret = io_submit_add_bh(io, inode, bh);
bd2d0210
TT
503 if (ret) {
504 /*
505 * We only get here on ENOMEM. Not much else
506 * we can do but mark the page as dirty, and
507 * better luck next time.
508 */
1ae48a63 509 redirty_page_for_writepage(wbc, page);
bd2d0210
TT
510 break;
511 }
0058f965 512 nr_submitted++;
1ae48a63 513 clear_buffer_dirty(bh);
0058f965
JK
514 } while ((bh = bh->b_this_page) != head);
515
516 /* Error stopped previous loop? Clean up buffers... */
517 if (ret) {
518 do {
519 clear_buffer_async_write(bh);
520 bh = bh->b_this_page;
521 } while (bh != head);
bd2d0210
TT
522 }
523 unlock_page(page);
0058f965
JK
524 /* Nothing submitted - we have to end page writeback */
525 if (!nr_submitted)
526 end_page_writeback(page);
bd2d0210
TT
527 return ret;
528}
This page took 0.204325 seconds and 5 git commands to generate.