2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "xfs_trans.h"
25 #include "xfs_mount.h"
26 #include "xfs_bmap_btree.h"
27 #include "xfs_dinode.h"
28 #include "xfs_inode.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
32 #include "xfs_iomap.h"
33 #include "xfs_vnodeops.h"
34 #include "xfs_trace.h"
36 #include <linux/gfp.h>
37 #include <linux/mpage.h>
38 #include <linux/pagevec.h>
39 #include <linux/writeback.h>
42 * Types of I/O for bmap clustering and I/O completion tracking.
45 IO_READ
, /* mapping for a read */
46 IO_DELAY
, /* mapping covers delalloc region */
47 IO_UNWRITTEN
, /* mapping covers allocated but uninitialized data */
48 IO_NEW
/* just allocated */
52 * Prime number of hash buckets since address is used as the key.
55 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
56 static wait_queue_head_t xfs_ioend_wq
[NVSYNC
];
63 for (i
= 0; i
< NVSYNC
; i
++)
64 init_waitqueue_head(&xfs_ioend_wq
[i
]);
71 wait_queue_head_t
*wq
= to_ioend_wq(ip
);
73 wait_event(*wq
, (atomic_read(&ip
->i_iocount
) == 0));
80 if (atomic_dec_and_test(&ip
->i_iocount
))
81 wake_up(to_ioend_wq(ip
));
90 struct buffer_head
*bh
, *head
;
92 *delalloc
= *unwritten
= 0;
94 bh
= head
= page_buffers(page
);
96 if (buffer_unwritten(bh
))
98 else if (buffer_delay(bh
))
100 } while ((bh
= bh
->b_this_page
) != head
);
103 STATIC
struct block_device
*
104 xfs_find_bdev_for_inode(
107 struct xfs_inode
*ip
= XFS_I(inode
);
108 struct xfs_mount
*mp
= ip
->i_mount
;
110 if (XFS_IS_REALTIME_INODE(ip
))
111 return mp
->m_rtdev_targp
->bt_bdev
;
113 return mp
->m_ddev_targp
->bt_bdev
;
117 * We're now finished for good with this ioend structure.
118 * Update the page state via the associated buffer_heads,
119 * release holds on the inode and bio, and finally free
120 * up memory. Do not use the ioend after this.
126 struct buffer_head
*bh
, *next
;
127 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
129 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
130 next
= bh
->b_private
;
131 bh
->b_end_io(bh
, !ioend
->io_error
);
135 * Volume managers supporting multiple paths can send back ENODEV
136 * when the final path disappears. In this case continuing to fill
137 * the page cache with dirty data which cannot be written out is
138 * evil, so prevent that.
140 if (unlikely(ioend
->io_error
== -ENODEV
)) {
141 xfs_do_force_shutdown(ip
->i_mount
, SHUTDOWN_DEVICE_REQ
,
146 mempool_free(ioend
, xfs_ioend_pool
);
150 * If the end of the current ioend is beyond the current EOF,
151 * return the new EOF value, otherwise zero.
157 xfs_inode_t
*ip
= XFS_I(ioend
->io_inode
);
161 bsize
= ioend
->io_offset
+ ioend
->io_size
;
162 isize
= MAX(ip
->i_size
, ip
->i_new_size
);
163 isize
= MIN(isize
, bsize
);
164 return isize
> ip
->i_d
.di_size
? isize
: 0;
168 * Update on-disk file size now that data has been written to disk. The
169 * current in-memory file size is i_size. If a write is beyond eof i_new_size
170 * will be the intended file size until i_size is updated. If this write does
171 * not extend all the way to the valid file size then restrict this update to
172 * the end of the write.
174 * This function does not block as blocking on the inode lock in IO completion
175 * can lead to IO completion order dependency deadlocks.. If it can't get the
176 * inode ilock it will return EAGAIN. Callers must handle this.
182 xfs_inode_t
*ip
= XFS_I(ioend
->io_inode
);
185 ASSERT((ip
->i_d
.di_mode
& S_IFMT
) == S_IFREG
);
186 ASSERT(ioend
->io_type
!= IO_READ
);
188 if (unlikely(ioend
->io_error
))
191 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
))
194 isize
= xfs_ioend_new_eof(ioend
);
196 ip
->i_d
.di_size
= isize
;
197 xfs_mark_inode_dirty(ip
);
200 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
205 * Schedule IO completion handling on the final put of an ioend.
209 struct xfs_ioend
*ioend
)
211 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
212 if (ioend
->io_type
== IO_UNWRITTEN
)
213 queue_work(xfsconvertd_workqueue
, &ioend
->io_work
);
215 queue_work(xfsdatad_workqueue
, &ioend
->io_work
);
220 * IO write completion.
224 struct work_struct
*work
)
226 xfs_ioend_t
*ioend
= container_of(work
, xfs_ioend_t
, io_work
);
227 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
231 * For unwritten extents we need to issue transactions to convert a
232 * range to normal written extens after the data I/O has finished.
234 if (ioend
->io_type
== IO_UNWRITTEN
&&
235 likely(!ioend
->io_error
&& !XFS_FORCED_SHUTDOWN(ip
->i_mount
))) {
237 error
= xfs_iomap_write_unwritten(ip
, ioend
->io_offset
,
240 ioend
->io_error
= error
;
244 * We might have to update the on-disk file size after extending
247 if (ioend
->io_type
!= IO_READ
) {
248 error
= xfs_setfilesize(ioend
);
249 ASSERT(!error
|| error
== EAGAIN
);
253 * If we didn't complete processing of the ioend, requeue it to the
254 * tail of the workqueue for another attempt later. Otherwise destroy
257 if (error
== EAGAIN
) {
258 atomic_inc(&ioend
->io_remaining
);
259 xfs_finish_ioend(ioend
);
260 /* ensure we don't spin on blocked ioends */
264 aio_complete(ioend
->io_iocb
, ioend
->io_result
, 0);
265 xfs_destroy_ioend(ioend
);
270 * Call IO completion handling in caller context on the final put of an ioend.
273 xfs_finish_ioend_sync(
274 struct xfs_ioend
*ioend
)
276 if (atomic_dec_and_test(&ioend
->io_remaining
))
277 xfs_end_io(&ioend
->io_work
);
281 * Allocate and initialise an IO completion structure.
282 * We need to track unwritten extent write completion here initially.
283 * We'll need to extend this for updating the ondisk inode size later
293 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
296 * Set the count to 1 initially, which will prevent an I/O
297 * completion callback from happening before we have started
298 * all the I/O from calling the completion routine too early.
300 atomic_set(&ioend
->io_remaining
, 1);
302 ioend
->io_list
= NULL
;
303 ioend
->io_type
= type
;
304 ioend
->io_inode
= inode
;
305 ioend
->io_buffer_head
= NULL
;
306 ioend
->io_buffer_tail
= NULL
;
307 atomic_inc(&XFS_I(ioend
->io_inode
)->i_iocount
);
308 ioend
->io_offset
= 0;
310 ioend
->io_iocb
= NULL
;
311 ioend
->io_result
= 0;
313 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
322 struct xfs_bmbt_irec
*imap
,
328 return -xfs_iomap(XFS_I(inode
), offset
, count
, flags
, imap
, &nmaps
, &new);
334 struct xfs_bmbt_irec
*imap
,
337 offset
>>= inode
->i_blkbits
;
339 return offset
>= imap
->br_startoff
&&
340 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
344 * BIO completion handler for buffered IO.
351 xfs_ioend_t
*ioend
= bio
->bi_private
;
353 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
354 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
356 /* Toss bio and pass work off to an xfsdatad thread */
357 bio
->bi_private
= NULL
;
358 bio
->bi_end_io
= NULL
;
361 xfs_finish_ioend(ioend
);
365 xfs_submit_ioend_bio(
366 struct writeback_control
*wbc
,
370 atomic_inc(&ioend
->io_remaining
);
371 bio
->bi_private
= ioend
;
372 bio
->bi_end_io
= xfs_end_bio
;
375 * If the I/O is beyond EOF we mark the inode dirty immediately
376 * but don't update the inode size until I/O completion.
378 if (xfs_ioend_new_eof(ioend
))
379 xfs_mark_inode_dirty(XFS_I(ioend
->io_inode
));
381 submit_bio(wbc
->sync_mode
== WB_SYNC_ALL
?
382 WRITE_SYNC_PLUG
: WRITE
, bio
);
383 ASSERT(!bio_flagged(bio
, BIO_EOPNOTSUPP
));
389 struct buffer_head
*bh
)
392 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
395 bio
= bio_alloc(GFP_NOIO
, nvecs
);
399 ASSERT(bio
->bi_private
== NULL
);
400 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
401 bio
->bi_bdev
= bh
->b_bdev
;
407 xfs_start_buffer_writeback(
408 struct buffer_head
*bh
)
410 ASSERT(buffer_mapped(bh
));
411 ASSERT(buffer_locked(bh
));
412 ASSERT(!buffer_delay(bh
));
413 ASSERT(!buffer_unwritten(bh
));
415 mark_buffer_async_write(bh
);
416 set_buffer_uptodate(bh
);
417 clear_buffer_dirty(bh
);
421 xfs_start_page_writeback(
426 ASSERT(PageLocked(page
));
427 ASSERT(!PageWriteback(page
));
429 clear_page_dirty_for_io(page
);
430 set_page_writeback(page
);
432 /* If no buffers on the page are to be written, finish it here */
434 end_page_writeback(page
);
437 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
439 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
443 * Submit all of the bios for all of the ioends we have saved up, covering the
444 * initial writepage page and also any probed pages.
446 * Because we may have multiple ioends spanning a page, we need to start
447 * writeback on all the buffers before we submit them for I/O. If we mark the
448 * buffers as we got, then we can end up with a page that only has buffers
449 * marked async write and I/O complete on can occur before we mark the other
450 * buffers async write.
452 * The end result of this is that we trip a bug in end_page_writeback() because
453 * we call it twice for the one page as the code in end_buffer_async_write()
454 * assumes that all buffers on the page are started at the same time.
456 * The fix is two passes across the ioend list - one to start writeback on the
457 * buffer_heads, and then submit them for I/O on the second pass.
461 struct writeback_control
*wbc
,
464 xfs_ioend_t
*head
= ioend
;
466 struct buffer_head
*bh
;
468 sector_t lastblock
= 0;
470 /* Pass 1 - start writeback */
472 next
= ioend
->io_list
;
473 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
474 xfs_start_buffer_writeback(bh
);
476 } while ((ioend
= next
) != NULL
);
478 /* Pass 2 - submit I/O */
481 next
= ioend
->io_list
;
484 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
488 bio
= xfs_alloc_ioend_bio(bh
);
489 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
490 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
494 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
495 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
499 lastblock
= bh
->b_blocknr
;
502 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
503 xfs_finish_ioend(ioend
);
504 } while ((ioend
= next
) != NULL
);
508 * Cancel submission of all buffer_heads so far in this endio.
509 * Toss the endio too. Only ever called for the initial page
510 * in a writepage request, so only ever one page.
517 struct buffer_head
*bh
, *next_bh
;
520 next
= ioend
->io_list
;
521 bh
= ioend
->io_buffer_head
;
523 next_bh
= bh
->b_private
;
524 clear_buffer_async_write(bh
);
526 } while ((bh
= next_bh
) != NULL
);
528 xfs_ioend_wake(XFS_I(ioend
->io_inode
));
529 mempool_free(ioend
, xfs_ioend_pool
);
530 } while ((ioend
= next
) != NULL
);
534 * Test to see if we've been building up a completion structure for
535 * earlier buffers -- if so, we try to append to this ioend if we
536 * can, otherwise we finish off any current ioend and start another.
537 * Return true if we've finished the given ioend.
542 struct buffer_head
*bh
,
545 xfs_ioend_t
**result
,
548 xfs_ioend_t
*ioend
= *result
;
550 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
551 xfs_ioend_t
*previous
= *result
;
553 ioend
= xfs_alloc_ioend(inode
, type
);
554 ioend
->io_offset
= offset
;
555 ioend
->io_buffer_head
= bh
;
556 ioend
->io_buffer_tail
= bh
;
558 previous
->io_list
= ioend
;
561 ioend
->io_buffer_tail
->b_private
= bh
;
562 ioend
->io_buffer_tail
= bh
;
565 bh
->b_private
= NULL
;
566 ioend
->io_size
+= bh
->b_size
;
572 struct buffer_head
*bh
,
573 struct xfs_bmbt_irec
*imap
,
577 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
578 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
579 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
581 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
582 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
584 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
585 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
587 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
590 set_buffer_mapped(bh
);
596 struct buffer_head
*bh
,
597 struct xfs_bmbt_irec
*imap
,
600 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
601 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
604 xfs_map_buffer(inode
, bh
, imap
, offset
);
605 bh
->b_bdev
= xfs_find_bdev_for_inode(inode
);
606 set_buffer_mapped(bh
);
607 clear_buffer_delay(bh
);
608 clear_buffer_unwritten(bh
);
612 * Look for a page at index that is suitable for clustering.
617 unsigned int pg_offset
)
619 struct buffer_head
*bh
, *head
;
622 if (PageWriteback(page
))
624 if (!PageDirty(page
))
628 if (!page_has_buffers(page
))
631 bh
= head
= page_buffers(page
);
633 if (!buffer_uptodate(bh
))
635 if (!buffer_mapped(bh
))
638 if (ret
>= pg_offset
)
640 } while ((bh
= bh
->b_this_page
) != head
);
648 struct page
*startpage
,
649 struct buffer_head
*bh
,
650 struct buffer_head
*head
)
653 pgoff_t tindex
, tlast
, tloff
;
657 /* First sum forwards in this page */
659 if (!buffer_uptodate(bh
) || !buffer_mapped(bh
))
662 } while ((bh
= bh
->b_this_page
) != head
);
664 /* if we reached the end of the page, sum forwards in following pages */
665 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
666 tindex
= startpage
->index
+ 1;
668 /* Prune this back to avoid pathological behavior */
669 tloff
= min(tlast
, startpage
->index
+ 64);
671 pagevec_init(&pvec
, 0);
672 while (!done
&& tindex
<= tloff
) {
673 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
675 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
678 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
679 struct page
*page
= pvec
.pages
[i
];
680 size_t pg_offset
, pg_len
= 0;
682 if (tindex
== tlast
) {
684 i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
690 pg_offset
= PAGE_CACHE_SIZE
;
692 if (page
->index
== tindex
&& trylock_page(page
)) {
693 pg_len
= xfs_probe_page(page
, pg_offset
);
706 pagevec_release(&pvec
);
714 * Test if a given page is suitable for writing as part of an unwritten
715 * or delayed allocate extent.
722 if (PageWriteback(page
))
725 if (page
->mapping
&& page_has_buffers(page
)) {
726 struct buffer_head
*bh
, *head
;
729 bh
= head
= page_buffers(page
);
731 if (buffer_unwritten(bh
))
732 acceptable
= (type
== IO_UNWRITTEN
);
733 else if (buffer_delay(bh
))
734 acceptable
= (type
== IO_DELAY
);
735 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
736 acceptable
= (type
== IO_NEW
);
739 } while ((bh
= bh
->b_this_page
) != head
);
749 * Allocate & map buffers for page given the extent map. Write it out.
750 * except for the original page of a writepage, this is called on
751 * delalloc/unwritten pages only, for the original page it is possible
752 * that the page has no mapping at all.
759 struct xfs_bmbt_irec
*imap
,
760 xfs_ioend_t
**ioendp
,
761 struct writeback_control
*wbc
,
764 struct buffer_head
*bh
, *head
;
765 xfs_off_t end_offset
;
766 unsigned long p_offset
;
769 int count
= 0, done
= 0, uptodate
= 1;
770 xfs_off_t offset
= page_offset(page
);
772 if (page
->index
!= tindex
)
774 if (!trylock_page(page
))
776 if (PageWriteback(page
))
777 goto fail_unlock_page
;
778 if (page
->mapping
!= inode
->i_mapping
)
779 goto fail_unlock_page
;
780 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
781 goto fail_unlock_page
;
784 * page_dirty is initially a count of buffers on the page before
785 * EOF and is decremented as we move each into a cleanable state.
789 * End offset is the highest offset that this page should represent.
790 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
791 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
792 * hence give us the correct page_dirty count. On any other page,
793 * it will be zero and in that case we need page_dirty to be the
794 * count of buffers on the page.
796 end_offset
= min_t(unsigned long long,
797 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
800 len
= 1 << inode
->i_blkbits
;
801 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
803 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
804 page_dirty
= p_offset
/ len
;
806 bh
= head
= page_buffers(page
);
808 if (offset
>= end_offset
)
810 if (!buffer_uptodate(bh
))
812 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
817 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
818 if (buffer_unwritten(bh
))
823 if (!xfs_imap_valid(inode
, imap
, offset
)) {
828 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
829 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
831 xfs_map_at_offset(inode
, bh
, imap
, offset
);
832 xfs_add_to_ioend(inode
, bh
, offset
, type
,
839 if (buffer_mapped(bh
) && all_bh
) {
841 xfs_add_to_ioend(inode
, bh
, offset
,
849 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
851 if (uptodate
&& bh
== head
)
852 SetPageUptodate(page
);
855 if (--wbc
->nr_to_write
<= 0 &&
856 wbc
->sync_mode
== WB_SYNC_NONE
)
859 xfs_start_page_writeback(page
, !page_dirty
, count
);
869 * Convert & write out a cluster of pages in the same extent as defined
870 * by mp and following the start page.
876 struct xfs_bmbt_irec
*imap
,
877 xfs_ioend_t
**ioendp
,
878 struct writeback_control
*wbc
,
885 pagevec_init(&pvec
, 0);
886 while (!done
&& tindex
<= tlast
) {
887 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
889 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
892 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
893 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
894 imap
, ioendp
, wbc
, all_bh
);
899 pagevec_release(&pvec
);
905 xfs_vm_invalidatepage(
907 unsigned long offset
)
909 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
);
910 block_invalidatepage(page
, offset
);
914 * If the page has delalloc buffers on it, we need to punch them out before we
915 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
916 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
917 * is done on that same region - the delalloc extent is returned when none is
918 * supposed to be there.
920 * We prevent this by truncating away the delalloc regions on the page before
921 * invalidating it. Because they are delalloc, we can do this without needing a
922 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
923 * truncation without a transaction as there is no space left for block
924 * reservation (typically why we see a ENOSPC in writeback).
926 * This is not a performance critical path, so for now just do the punching a
927 * buffer head at a time.
930 xfs_aops_discard_page(
933 struct inode
*inode
= page
->mapping
->host
;
934 struct xfs_inode
*ip
= XFS_I(inode
);
935 struct buffer_head
*bh
, *head
;
936 loff_t offset
= page_offset(page
);
937 ssize_t len
= 1 << inode
->i_blkbits
;
939 if (!xfs_is_delayed_page(page
, IO_DELAY
))
942 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
945 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
946 "page discard on page %p, inode 0x%llx, offset %llu.",
947 page
, ip
->i_ino
, offset
);
949 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
950 bh
= head
= page_buffers(page
);
953 xfs_fileoff_t offset_fsb
;
954 xfs_bmbt_irec_t imap
;
957 xfs_fsblock_t firstblock
;
958 xfs_bmap_free_t flist
;
960 if (!buffer_delay(bh
))
963 offset_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
966 * Map the range first and check that it is a delalloc extent
967 * before trying to unmap the range. Otherwise we will be
968 * trying to remove a real extent (which requires a
969 * transaction) or a hole, which is probably a bad idea...
971 error
= xfs_bmapi(NULL
, ip
, offset_fsb
, 1,
972 XFS_BMAPI_ENTIRE
, NULL
, 0, &imap
,
976 /* something screwed, just bail */
977 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
978 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
979 "page discard failed delalloc mapping lookup.");
987 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
988 /* been converted, ignore */
991 WARN_ON(imap
.br_blockcount
== 0);
994 * Note: while we initialise the firstblock/flist pair, they
995 * should never be used because blocks should never be
996 * allocated or freed for a delalloc extent and hence we need
997 * don't cancel or finish them after the xfs_bunmapi() call.
999 xfs_bmap_init(&flist
, &firstblock
);
1000 error
= xfs_bunmapi(NULL
, ip
, offset_fsb
, 1, 0, 1, &firstblock
,
1003 ASSERT(!flist
.xbf_count
&& !flist
.xbf_first
);
1005 /* something screwed, just bail */
1006 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1007 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
1008 "page discard unable to remove delalloc mapping.");
1015 } while ((bh
= bh
->b_this_page
) != head
);
1017 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1019 xfs_vm_invalidatepage(page
, 0);
1024 * Write out a dirty page.
1026 * For delalloc space on the page we need to allocate space and flush it.
1027 * For unwritten space on the page we need to start the conversion to
1028 * regular allocated space.
1029 * For any other dirty buffer heads on the page we should flush them.
1031 * If we detect that a transaction would be required to flush the page, we
1032 * have to check the process flags first, if we are already in a transaction
1033 * or disk I/O during allocations is off, we need to fail the writepage and
1039 struct writeback_control
*wbc
)
1041 struct inode
*inode
= page
->mapping
->host
;
1042 int delalloc
, unwritten
;
1043 struct buffer_head
*bh
, *head
;
1044 struct xfs_bmbt_irec imap
;
1045 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
1048 __uint64_t end_offset
;
1049 pgoff_t end_index
, last_index
;
1051 int flags
, err
, imap_valid
= 0, uptodate
= 1;
1055 trace_xfs_writepage(inode
, page
, 0);
1057 ASSERT(page_has_buffers(page
));
1060 * Refuse to write the page out if we are called from reclaim context.
1062 * This avoids stack overflows when called from deeply used stacks in
1063 * random callers for direct reclaim or memcg reclaim. We explicitly
1064 * allow reclaim from kswapd as the stack usage there is relatively low.
1066 * This should really be done by the core VM, but until that happens
1067 * filesystems like XFS, btrfs and ext4 have to take care of this
1070 if ((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) == PF_MEMALLOC
)
1074 * We need a transaction if there are delalloc or unwritten buffers
1077 * If we need a transaction and the process flags say we are already
1078 * in a transaction, or no IO is allowed then mark the page dirty
1079 * again and leave the page as is.
1081 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1082 if ((current
->flags
& PF_FSTRANS
) && (delalloc
|| unwritten
))
1085 /* Is this page beyond the end of the file? */
1086 offset
= i_size_read(inode
);
1087 end_index
= offset
>> PAGE_CACHE_SHIFT
;
1088 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
1089 if (page
->index
>= end_index
) {
1090 if ((page
->index
>= end_index
+ 1) ||
1091 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
1097 end_offset
= min_t(unsigned long long,
1098 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
1100 len
= 1 << inode
->i_blkbits
;
1102 bh
= head
= page_buffers(page
);
1103 offset
= page_offset(page
);
1108 if (offset
>= end_offset
)
1110 if (!buffer_uptodate(bh
))
1114 * A hole may still be marked uptodate because discard_buffer
1115 * leaves the flag set.
1117 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
1118 ASSERT(!buffer_dirty(bh
));
1124 imap_valid
= xfs_imap_valid(inode
, &imap
, offset
);
1126 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
1130 * Make sure we don't use a read-only iomap
1132 if (flags
== BMAPI_READ
)
1135 if (buffer_unwritten(bh
)) {
1136 type
= IO_UNWRITTEN
;
1137 flags
= BMAPI_WRITE
| BMAPI_IGNSTATE
;
1138 } else if (buffer_delay(bh
)) {
1140 flags
= BMAPI_ALLOCATE
;
1142 if (wbc
->sync_mode
== WB_SYNC_NONE
&&
1144 flags
|= BMAPI_TRYLOCK
;
1149 * If we didn't have a valid mapping then we
1150 * need to ensure that we put the new mapping
1151 * in a new ioend structure. This needs to be
1152 * done to ensure that the ioends correctly
1153 * reflect the block mappings at io completion
1154 * for unwritten extent conversion.
1157 err
= xfs_map_blocks(inode
, offset
, len
,
1161 imap_valid
= xfs_imap_valid(inode
, &imap
,
1165 xfs_map_at_offset(inode
, bh
, &imap
, offset
);
1166 xfs_add_to_ioend(inode
, bh
, offset
, type
,
1170 } else if (buffer_uptodate(bh
)) {
1172 * we got here because the buffer is already mapped.
1173 * That means it must already have extents allocated
1174 * underneath it. Map the extent by reading it.
1176 if (!imap_valid
|| flags
!= BMAPI_READ
) {
1178 size
= xfs_probe_cluster(inode
, page
, bh
, head
);
1179 err
= xfs_map_blocks(inode
, offset
, size
,
1183 imap_valid
= xfs_imap_valid(inode
, &imap
,
1188 * We set the type to IO_NEW in case we are doing a
1189 * small write at EOF that is extending the file but
1190 * without needing an allocation. We need to update the
1191 * file size on I/O completion in this case so it is
1192 * the same case as having just allocated a new extent
1193 * that we are writing into for the first time.
1196 if (trylock_buffer(bh
)) {
1199 xfs_add_to_ioend(inode
, bh
, offset
, type
,
1200 &ioend
, !imap_valid
);
1205 } else if (PageUptodate(page
)) {
1206 ASSERT(buffer_mapped(bh
));
1213 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1215 if (uptodate
&& bh
== head
)
1216 SetPageUptodate(page
);
1218 xfs_start_page_writeback(page
, 1, count
);
1220 if (ioend
&& imap_valid
) {
1221 xfs_off_t end_index
;
1223 end_index
= imap
.br_startoff
+ imap
.br_blockcount
;
1226 end_index
<<= inode
->i_blkbits
;
1229 end_index
= (end_index
- 1) >> PAGE_CACHE_SHIFT
;
1231 /* check against file size */
1232 if (end_index
> last_index
)
1233 end_index
= last_index
;
1235 xfs_cluster_write(inode
, page
->index
+ 1, &imap
, &ioend
,
1236 wbc
, all_bh
, end_index
);
1240 xfs_submit_ioend(wbc
, iohead
);
1246 xfs_cancel_ioend(iohead
);
1251 xfs_aops_discard_page(page
);
1252 ClearPageUptodate(page
);
1257 redirty_page_for_writepage(wbc
, page
);
1264 struct address_space
*mapping
,
1265 struct writeback_control
*wbc
)
1267 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1268 return generic_writepages(mapping
, wbc
);
1272 * Called to move a page into cleanable state - and from there
1273 * to be released. The page should already be clean. We always
1274 * have buffer heads in this call.
1276 * Returns 1 if the page is ok to release, 0 otherwise.
1283 int delalloc
, unwritten
;
1285 trace_xfs_releasepage(page
->mapping
->host
, page
, 0);
1287 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1289 if (WARN_ON(delalloc
))
1291 if (WARN_ON(unwritten
))
1294 return try_to_free_buffers(page
);
1299 struct inode
*inode
,
1301 struct buffer_head
*bh_result
,
1305 int flags
= create
? BMAPI_WRITE
: BMAPI_READ
;
1306 struct xfs_bmbt_irec imap
;
1313 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1314 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1315 size
= bh_result
->b_size
;
1317 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1320 if (direct
&& create
)
1321 flags
|= BMAPI_DIRECT
;
1323 error
= xfs_iomap(XFS_I(inode
), offset
, size
, flags
, &imap
, &nimap
,
1330 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
1331 imap
.br_startblock
!= DELAYSTARTBLOCK
) {
1333 * For unwritten extents do not report a disk address on
1334 * the read case (treat as if we're reading into a hole).
1336 if (create
|| !ISUNWRITTEN(&imap
))
1337 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1338 if (create
&& ISUNWRITTEN(&imap
)) {
1340 bh_result
->b_private
= inode
;
1341 set_buffer_unwritten(bh_result
);
1346 * If this is a realtime file, data may be on a different device.
1347 * to that pointed to from the buffer_head b_bdev currently.
1349 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1352 * If we previously allocated a block out beyond eof and we are now
1353 * coming back to use it then we will need to flag it as new even if it
1354 * has a disk address.
1356 * With sub-block writes into unwritten extents we also need to mark
1357 * the buffer as new so that the unwritten parts of the buffer gets
1361 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1362 (offset
>= i_size_read(inode
)) ||
1363 (new || ISUNWRITTEN(&imap
))))
1364 set_buffer_new(bh_result
);
1366 if (imap
.br_startblock
== DELAYSTARTBLOCK
) {
1369 set_buffer_uptodate(bh_result
);
1370 set_buffer_mapped(bh_result
);
1371 set_buffer_delay(bh_result
);
1376 * If this is O_DIRECT or the mpage code calling tell them how large
1377 * the mapping is, so that we can avoid repeated get_blocks calls.
1379 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1380 xfs_off_t mapping_size
;
1382 mapping_size
= imap
.br_startoff
+ imap
.br_blockcount
- iblock
;
1383 mapping_size
<<= inode
->i_blkbits
;
1385 ASSERT(mapping_size
> 0);
1386 if (mapping_size
> size
)
1387 mapping_size
= size
;
1388 if (mapping_size
> LONG_MAX
)
1389 mapping_size
= LONG_MAX
;
1391 bh_result
->b_size
= mapping_size
;
1399 struct inode
*inode
,
1401 struct buffer_head
*bh_result
,
1404 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 0);
1408 xfs_get_blocks_direct(
1409 struct inode
*inode
,
1411 struct buffer_head
*bh_result
,
1414 return __xfs_get_blocks(inode
, iblock
, bh_result
, create
, 1);
1418 * Complete a direct I/O write request.
1420 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1421 * need to issue a transaction to convert the range from unwritten to written
1422 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1423 * to do this and we are done. But in case this was a successfull AIO
1424 * request this handler is called from interrupt context, from which we
1425 * can't start transactions. In that case offload the I/O completion to
1426 * the workqueues we also use for buffered I/O completion.
1429 xfs_end_io_direct_write(
1437 struct xfs_ioend
*ioend
= iocb
->private;
1440 * blockdev_direct_IO can return an error even after the I/O
1441 * completion handler was called. Thus we need to protect
1442 * against double-freeing.
1444 iocb
->private = NULL
;
1446 ioend
->io_offset
= offset
;
1447 ioend
->io_size
= size
;
1448 if (private && size
> 0)
1449 ioend
->io_type
= IO_UNWRITTEN
;
1453 * If we are converting an unwritten extent we need to delay
1454 * the AIO completion until after the unwrittent extent
1455 * conversion has completed, otherwise do it ASAP.
1457 if (ioend
->io_type
== IO_UNWRITTEN
) {
1458 ioend
->io_iocb
= iocb
;
1459 ioend
->io_result
= ret
;
1461 aio_complete(iocb
, ret
, 0);
1463 xfs_finish_ioend(ioend
);
1465 xfs_finish_ioend_sync(ioend
);
1473 const struct iovec
*iov
,
1475 unsigned long nr_segs
)
1477 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1478 struct block_device
*bdev
= xfs_find_bdev_for_inode(inode
);
1482 iocb
->private = xfs_alloc_ioend(inode
, IO_NEW
);
1484 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1486 xfs_get_blocks_direct
,
1487 xfs_end_io_direct_write
, NULL
, 0);
1488 if (ret
!= -EIOCBQUEUED
&& iocb
->private)
1489 xfs_destroy_ioend(iocb
->private);
1491 ret
= __blockdev_direct_IO(rw
, iocb
, inode
, bdev
, iov
,
1493 xfs_get_blocks_direct
,
1501 xfs_vm_write_failed(
1502 struct address_space
*mapping
,
1505 struct inode
*inode
= mapping
->host
;
1507 if (to
> inode
->i_size
) {
1509 .ia_valid
= ATTR_SIZE
| ATTR_FORCE
,
1510 .ia_size
= inode
->i_size
,
1512 xfs_setattr(XFS_I(inode
), &ia
, XFS_ATTR_NOLOCK
);
1519 struct address_space
*mapping
,
1523 struct page
**pagep
,
1528 ret
= block_write_begin(mapping
, pos
, len
, flags
| AOP_FLAG_NOFS
,
1529 pagep
, xfs_get_blocks
);
1531 xfs_vm_write_failed(mapping
, pos
+ len
);
1538 struct address_space
*mapping
,
1547 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
1548 if (unlikely(ret
< len
))
1549 xfs_vm_write_failed(mapping
, pos
+ len
);
1555 struct address_space
*mapping
,
1558 struct inode
*inode
= (struct inode
*)mapping
->host
;
1559 struct xfs_inode
*ip
= XFS_I(inode
);
1561 trace_xfs_vm_bmap(XFS_I(inode
));
1562 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1563 xfs_flush_pages(ip
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1564 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1565 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1570 struct file
*unused
,
1573 return mpage_readpage(page
, xfs_get_blocks
);
1578 struct file
*unused
,
1579 struct address_space
*mapping
,
1580 struct list_head
*pages
,
1583 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1586 const struct address_space_operations xfs_address_space_operations
= {
1587 .readpage
= xfs_vm_readpage
,
1588 .readpages
= xfs_vm_readpages
,
1589 .writepage
= xfs_vm_writepage
,
1590 .writepages
= xfs_vm_writepages
,
1591 .sync_page
= block_sync_page
,
1592 .releasepage
= xfs_vm_releasepage
,
1593 .invalidatepage
= xfs_vm_invalidatepage
,
1594 .write_begin
= xfs_vm_write_begin
,
1595 .write_end
= xfs_vm_write_end
,
1596 .bmap
= xfs_vm_bmap
,
1597 .direct_IO
= xfs_vm_direct_IO
,
1598 .migratepage
= buffer_migrate_page
,
1599 .is_partially_uptodate
= block_is_partially_uptodate
,
1600 .error_remove_page
= generic_error_remove_page
,