2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
39 #include "xfs_iomap.h"
40 #include <linux/mpage.h>
41 #include <linux/pagevec.h>
42 #include <linux/writeback.h>
51 struct buffer_head
*bh
, *head
;
53 *delalloc
= *unmapped
= *unwritten
= 0;
55 bh
= head
= page_buffers(page
);
57 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
59 else if (buffer_unwritten(bh
) && !buffer_delay(bh
))
60 clear_buffer_unwritten(bh
);
61 else if (buffer_unwritten(bh
))
63 else if (buffer_delay(bh
))
65 } while ((bh
= bh
->b_this_page
) != head
);
68 #if defined(XFS_RW_TRACE)
77 bhv_vnode_t
*vp
= vn_from_inode(inode
);
78 loff_t isize
= i_size_read(inode
);
79 loff_t offset
= page_offset(page
);
80 int delalloc
= -1, unmapped
= -1, unwritten
= -1;
82 if (page_has_buffers(page
))
83 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
89 ktrace_enter(ip
->i_rwtrace
,
90 (void *)((unsigned long)tag
),
95 (void *)((unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff)),
96 (void *)((unsigned long)(ip
->i_d
.di_size
& 0xffffffff)),
97 (void *)((unsigned long)((isize
>> 32) & 0xffffffff)),
98 (void *)((unsigned long)(isize
& 0xffffffff)),
99 (void *)((unsigned long)((offset
>> 32) & 0xffffffff)),
100 (void *)((unsigned long)(offset
& 0xffffffff)),
101 (void *)((unsigned long)delalloc
),
102 (void *)((unsigned long)unmapped
),
103 (void *)((unsigned long)unwritten
),
104 (void *)((unsigned long)current_pid()),
108 #define xfs_page_trace(tag, inode, page, pgoff)
112 * Schedule IO completion handling on a xfsdatad if this was
113 * the final hold on this ioend.
119 if (atomic_dec_and_test(&ioend
->io_remaining
))
120 queue_work(xfsdatad_workqueue
, &ioend
->io_work
);
124 * We're now finished for good with this ioend structure.
125 * Update the page state via the associated buffer_heads,
126 * release holds on the inode and bio, and finally free
127 * up memory. Do not use the ioend after this.
133 struct buffer_head
*bh
, *next
;
135 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
136 next
= bh
->b_private
;
137 bh
->b_end_io(bh
, !ioend
->io_error
);
139 if (unlikely(ioend
->io_error
))
140 vn_ioerror(ioend
->io_vnode
, ioend
->io_error
, __FILE__
,__LINE__
);
141 vn_iowake(ioend
->io_vnode
);
142 mempool_free(ioend
, xfs_ioend_pool
);
146 * Buffered IO write completion for delayed allocate extents.
147 * TODO: Update ondisk isize now that we know the file data
148 * has been flushed (i.e. the notorious "NULL file" problem).
151 xfs_end_bio_delalloc(
152 struct work_struct
*work
)
155 container_of(work
, xfs_ioend_t
, io_work
);
157 xfs_destroy_ioend(ioend
);
161 * Buffered IO write completion for regular, written extents.
165 struct work_struct
*work
)
168 container_of(work
, xfs_ioend_t
, io_work
);
170 xfs_destroy_ioend(ioend
);
174 * IO write completion for unwritten extents.
176 * Issue transactions to convert a buffer range from unwritten
177 * to written extents.
180 xfs_end_bio_unwritten(
181 struct work_struct
*work
)
184 container_of(work
, xfs_ioend_t
, io_work
);
185 bhv_vnode_t
*vp
= ioend
->io_vnode
;
186 xfs_off_t offset
= ioend
->io_offset
;
187 size_t size
= ioend
->io_size
;
189 if (likely(!ioend
->io_error
))
190 bhv_vop_bmap(vp
, offset
, size
, BMAPI_UNWRITTEN
, NULL
, NULL
);
191 xfs_destroy_ioend(ioend
);
195 * Allocate and initialise an IO completion structure.
196 * We need to track unwritten extent write completion here initially.
197 * We'll need to extend this for updating the ondisk inode size later
207 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
210 * Set the count to 1 initially, which will prevent an I/O
211 * completion callback from happening before we have started
212 * all the I/O from calling the completion routine too early.
214 atomic_set(&ioend
->io_remaining
, 1);
216 ioend
->io_list
= NULL
;
217 ioend
->io_type
= type
;
218 ioend
->io_vnode
= vn_from_inode(inode
);
219 ioend
->io_buffer_head
= NULL
;
220 ioend
->io_buffer_tail
= NULL
;
221 atomic_inc(&ioend
->io_vnode
->v_iocount
);
222 ioend
->io_offset
= 0;
225 if (type
== IOMAP_UNWRITTEN
)
226 INIT_WORK(&ioend
->io_work
, xfs_end_bio_unwritten
);
227 else if (type
== IOMAP_DELAY
)
228 INIT_WORK(&ioend
->io_work
, xfs_end_bio_delalloc
);
230 INIT_WORK(&ioend
->io_work
, xfs_end_bio_written
);
243 bhv_vnode_t
*vp
= vn_from_inode(inode
);
244 int error
, nmaps
= 1;
246 error
= bhv_vop_bmap(vp
, offset
, count
, flags
, mapp
, &nmaps
);
247 if (!error
&& (flags
& (BMAPI_WRITE
|BMAPI_ALLOCATE
)))
257 return offset
>= iomapp
->iomap_offset
&&
258 offset
< iomapp
->iomap_offset
+ iomapp
->iomap_bsize
;
262 * BIO completion handler for buffered IO.
267 unsigned int bytes_done
,
270 xfs_ioend_t
*ioend
= bio
->bi_private
;
275 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
276 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
278 /* Toss bio and pass work off to an xfsdatad thread */
279 bio
->bi_private
= NULL
;
280 bio
->bi_end_io
= NULL
;
283 xfs_finish_ioend(ioend
);
288 xfs_submit_ioend_bio(
292 atomic_inc(&ioend
->io_remaining
);
294 bio
->bi_private
= ioend
;
295 bio
->bi_end_io
= xfs_end_bio
;
297 submit_bio(WRITE
, bio
);
298 ASSERT(!bio_flagged(bio
, BIO_EOPNOTSUPP
));
304 struct buffer_head
*bh
)
307 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
310 bio
= bio_alloc(GFP_NOIO
, nvecs
);
314 ASSERT(bio
->bi_private
== NULL
);
315 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
316 bio
->bi_bdev
= bh
->b_bdev
;
322 xfs_start_buffer_writeback(
323 struct buffer_head
*bh
)
325 ASSERT(buffer_mapped(bh
));
326 ASSERT(buffer_locked(bh
));
327 ASSERT(!buffer_delay(bh
));
328 ASSERT(!buffer_unwritten(bh
));
330 mark_buffer_async_write(bh
);
331 set_buffer_uptodate(bh
);
332 clear_buffer_dirty(bh
);
336 xfs_start_page_writeback(
338 struct writeback_control
*wbc
,
342 ASSERT(PageLocked(page
));
343 ASSERT(!PageWriteback(page
));
345 clear_page_dirty_for_io(page
);
346 set_page_writeback(page
);
349 end_page_writeback(page
);
350 wbc
->pages_skipped
++; /* We didn't write this page */
354 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
356 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
360 * Submit all of the bios for all of the ioends we have saved up, covering the
361 * initial writepage page and also any probed pages.
363 * Because we may have multiple ioends spanning a page, we need to start
364 * writeback on all the buffers before we submit them for I/O. If we mark the
365 * buffers as we got, then we can end up with a page that only has buffers
366 * marked async write and I/O complete on can occur before we mark the other
367 * buffers async write.
369 * The end result of this is that we trip a bug in end_page_writeback() because
370 * we call it twice for the one page as the code in end_buffer_async_write()
371 * assumes that all buffers on the page are started at the same time.
373 * The fix is two passes across the ioend list - one to start writeback on the
374 * buffer_heads, and then submit them for I/O on the second pass.
380 xfs_ioend_t
*head
= ioend
;
382 struct buffer_head
*bh
;
384 sector_t lastblock
= 0;
386 /* Pass 1 - start writeback */
388 next
= ioend
->io_list
;
389 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
390 xfs_start_buffer_writeback(bh
);
392 } while ((ioend
= next
) != NULL
);
394 /* Pass 2 - submit I/O */
397 next
= ioend
->io_list
;
400 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
404 bio
= xfs_alloc_ioend_bio(bh
);
405 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
406 xfs_submit_ioend_bio(ioend
, bio
);
410 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
411 xfs_submit_ioend_bio(ioend
, bio
);
415 lastblock
= bh
->b_blocknr
;
418 xfs_submit_ioend_bio(ioend
, bio
);
419 xfs_finish_ioend(ioend
);
420 } while ((ioend
= next
) != NULL
);
424 * Cancel submission of all buffer_heads so far in this endio.
425 * Toss the endio too. Only ever called for the initial page
426 * in a writepage request, so only ever one page.
433 struct buffer_head
*bh
, *next_bh
;
436 next
= ioend
->io_list
;
437 bh
= ioend
->io_buffer_head
;
439 next_bh
= bh
->b_private
;
440 clear_buffer_async_write(bh
);
442 } while ((bh
= next_bh
) != NULL
);
444 vn_iowake(ioend
->io_vnode
);
445 mempool_free(ioend
, xfs_ioend_pool
);
446 } while ((ioend
= next
) != NULL
);
450 * Test to see if we've been building up a completion structure for
451 * earlier buffers -- if so, we try to append to this ioend if we
452 * can, otherwise we finish off any current ioend and start another.
453 * Return true if we've finished the given ioend.
458 struct buffer_head
*bh
,
461 xfs_ioend_t
**result
,
464 xfs_ioend_t
*ioend
= *result
;
466 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
467 xfs_ioend_t
*previous
= *result
;
469 ioend
= xfs_alloc_ioend(inode
, type
);
470 ioend
->io_offset
= offset
;
471 ioend
->io_buffer_head
= bh
;
472 ioend
->io_buffer_tail
= bh
;
474 previous
->io_list
= ioend
;
477 ioend
->io_buffer_tail
->b_private
= bh
;
478 ioend
->io_buffer_tail
= bh
;
481 bh
->b_private
= NULL
;
482 ioend
->io_size
+= bh
->b_size
;
487 struct buffer_head
*bh
,
494 ASSERT(mp
->iomap_bn
!= IOMAP_DADDR_NULL
);
496 bn
= (mp
->iomap_bn
>> (block_bits
- BBSHIFT
)) +
497 ((offset
- mp
->iomap_offset
) >> block_bits
);
499 ASSERT(bn
|| (mp
->iomap_flags
& IOMAP_REALTIME
));
502 set_buffer_mapped(bh
);
507 struct buffer_head
*bh
,
512 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
513 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
516 xfs_map_buffer(bh
, iomapp
, offset
, block_bits
);
517 bh
->b_bdev
= iomapp
->iomap_target
->bt_bdev
;
518 set_buffer_mapped(bh
);
519 clear_buffer_delay(bh
);
520 clear_buffer_unwritten(bh
);
524 * Look for a page at index that is suitable for clustering.
529 unsigned int pg_offset
,
534 if (PageWriteback(page
))
537 if (page
->mapping
&& PageDirty(page
)) {
538 if (page_has_buffers(page
)) {
539 struct buffer_head
*bh
, *head
;
541 bh
= head
= page_buffers(page
);
543 if (!buffer_uptodate(bh
))
545 if (mapped
!= buffer_mapped(bh
))
548 if (ret
>= pg_offset
)
550 } while ((bh
= bh
->b_this_page
) != head
);
552 ret
= mapped
? 0 : PAGE_CACHE_SIZE
;
561 struct page
*startpage
,
562 struct buffer_head
*bh
,
563 struct buffer_head
*head
,
567 pgoff_t tindex
, tlast
, tloff
;
571 /* First sum forwards in this page */
573 if (!buffer_uptodate(bh
) || (mapped
!= buffer_mapped(bh
)))
576 } while ((bh
= bh
->b_this_page
) != head
);
578 /* if we reached the end of the page, sum forwards in following pages */
579 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
580 tindex
= startpage
->index
+ 1;
582 /* Prune this back to avoid pathological behavior */
583 tloff
= min(tlast
, startpage
->index
+ 64);
585 pagevec_init(&pvec
, 0);
586 while (!done
&& tindex
<= tloff
) {
587 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
589 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
592 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
593 struct page
*page
= pvec
.pages
[i
];
594 size_t pg_offset
, len
= 0;
596 if (tindex
== tlast
) {
598 i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
604 pg_offset
= PAGE_CACHE_SIZE
;
606 if (page
->index
== tindex
&& !TestSetPageLocked(page
)) {
607 len
= xfs_probe_page(page
, pg_offset
, mapped
);
620 pagevec_release(&pvec
);
628 * Test if a given page is suitable for writing as part of an unwritten
629 * or delayed allocate extent.
636 if (PageWriteback(page
))
639 if (page
->mapping
&& page_has_buffers(page
)) {
640 struct buffer_head
*bh
, *head
;
643 bh
= head
= page_buffers(page
);
645 if (buffer_unwritten(bh
))
646 acceptable
= (type
== IOMAP_UNWRITTEN
);
647 else if (buffer_delay(bh
))
648 acceptable
= (type
== IOMAP_DELAY
);
649 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
650 acceptable
= (type
== 0);
653 } while ((bh
= bh
->b_this_page
) != head
);
663 * Allocate & map buffers for page given the extent map. Write it out.
664 * except for the original page of a writepage, this is called on
665 * delalloc/unwritten pages only, for the original page it is possible
666 * that the page has no mapping at all.
674 xfs_ioend_t
**ioendp
,
675 struct writeback_control
*wbc
,
679 struct buffer_head
*bh
, *head
;
680 xfs_off_t end_offset
;
681 unsigned long p_offset
;
683 int bbits
= inode
->i_blkbits
;
685 int count
= 0, done
= 0, uptodate
= 1;
686 xfs_off_t offset
= page_offset(page
);
688 if (page
->index
!= tindex
)
690 if (TestSetPageLocked(page
))
692 if (PageWriteback(page
))
693 goto fail_unlock_page
;
694 if (page
->mapping
!= inode
->i_mapping
)
695 goto fail_unlock_page
;
696 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
697 goto fail_unlock_page
;
700 * page_dirty is initially a count of buffers on the page before
701 * EOF and is decremented as we move each into a cleanable state.
705 * End offset is the highest offset that this page should represent.
706 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
707 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
708 * hence give us the correct page_dirty count. On any other page,
709 * it will be zero and in that case we need page_dirty to be the
710 * count of buffers on the page.
712 end_offset
= min_t(unsigned long long,
713 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
716 len
= 1 << inode
->i_blkbits
;
717 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
719 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
720 page_dirty
= p_offset
/ len
;
722 bh
= head
= page_buffers(page
);
724 if (offset
>= end_offset
)
726 if (!buffer_uptodate(bh
))
728 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
733 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
734 if (buffer_unwritten(bh
))
735 type
= IOMAP_UNWRITTEN
;
739 if (!xfs_iomap_valid(mp
, offset
)) {
744 ASSERT(!(mp
->iomap_flags
& IOMAP_HOLE
));
745 ASSERT(!(mp
->iomap_flags
& IOMAP_DELAY
));
747 xfs_map_at_offset(bh
, offset
, bbits
, mp
);
749 xfs_add_to_ioend(inode
, bh
, offset
,
752 set_buffer_dirty(bh
);
754 mark_buffer_dirty(bh
);
760 if (buffer_mapped(bh
) && all_bh
&& startio
) {
762 xfs_add_to_ioend(inode
, bh
, offset
,
770 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
772 if (uptodate
&& bh
== head
)
773 SetPageUptodate(page
);
777 struct backing_dev_info
*bdi
;
779 bdi
= inode
->i_mapping
->backing_dev_info
;
781 if (bdi_write_congested(bdi
)) {
782 wbc
->encountered_congestion
= 1;
784 } else if (wbc
->nr_to_write
<= 0) {
788 xfs_start_page_writeback(page
, wbc
, !page_dirty
, count
);
799 * Convert & write out a cluster of pages in the same extent as defined
800 * by mp and following the start page.
807 xfs_ioend_t
**ioendp
,
808 struct writeback_control
*wbc
,
816 pagevec_init(&pvec
, 0);
817 while (!done
&& tindex
<= tlast
) {
818 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
820 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
823 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
824 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
825 iomapp
, ioendp
, wbc
, startio
, all_bh
);
830 pagevec_release(&pvec
);
836 * Calling this without startio set means we are being asked to make a dirty
837 * page ready for freeing it's buffers. When called with startio set then
838 * we are coming from writepage.
840 * When called with startio set it is important that we write the WHOLE
842 * The bh->b_state's cannot know if any of the blocks or which block for
843 * that matter are dirty due to mmap writes, and therefore bh uptodate is
844 * only valid if the page itself isn't completely uptodate. Some layers
845 * may clear the page dirty flag prior to calling write page, under the
846 * assumption the entire page will be written out; by not writing out the
847 * whole page the page can be reused before all valid dirty data is
848 * written out. Note: in the case of a page that has been dirty'd by
849 * mapwrite and but partially setup by block_prepare_write the
850 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
851 * valid state, thus the whole page must be written out thing.
855 xfs_page_state_convert(
858 struct writeback_control
*wbc
,
860 int unmapped
) /* also implies page uptodate */
862 struct buffer_head
*bh
, *head
;
864 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
866 unsigned long p_offset
= 0;
868 __uint64_t end_offset
;
869 pgoff_t end_index
, last_index
, tlast
;
871 int flags
, err
, iomap_valid
= 0, uptodate
= 1;
872 int page_dirty
, count
= 0;
874 int all_bh
= unmapped
;
877 if (wbc
->sync_mode
== WB_SYNC_NONE
&& wbc
->nonblocking
)
878 trylock
|= BMAPI_TRYLOCK
;
881 /* Is this page beyond the end of the file? */
882 offset
= i_size_read(inode
);
883 end_index
= offset
>> PAGE_CACHE_SHIFT
;
884 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
885 if (page
->index
>= end_index
) {
886 if ((page
->index
>= end_index
+ 1) ||
887 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
895 * page_dirty is initially a count of buffers on the page before
896 * EOF and is decremented as we move each into a cleanable state.
900 * End offset is the highest offset that this page should represent.
901 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
902 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
903 * hence give us the correct page_dirty count. On any other page,
904 * it will be zero and in that case we need page_dirty to be the
905 * count of buffers on the page.
907 end_offset
= min_t(unsigned long long,
908 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
909 len
= 1 << inode
->i_blkbits
;
910 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
912 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
913 page_dirty
= p_offset
/ len
;
915 bh
= head
= page_buffers(page
);
916 offset
= page_offset(page
);
920 /* TODO: cleanup count and page_dirty */
923 if (offset
>= end_offset
)
925 if (!buffer_uptodate(bh
))
927 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
) {
929 * the iomap is actually still valid, but the ioend
930 * isn't. shouldn't happen too often.
937 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
940 * First case, map an unwritten extent and prepare for
941 * extent state conversion transaction on completion.
943 * Second case, allocate space for a delalloc buffer.
944 * We can return EAGAIN here in the release page case.
946 * Third case, an unmapped buffer was found, and we are
947 * in a path where we need to write the whole page out.
949 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
950 ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
951 !buffer_mapped(bh
) && (unmapped
|| startio
))) {
953 * Make sure we don't use a read-only iomap
955 if (flags
== BMAPI_READ
)
958 if (buffer_unwritten(bh
)) {
959 type
= IOMAP_UNWRITTEN
;
960 flags
= BMAPI_WRITE
| BMAPI_IGNSTATE
;
961 } else if (buffer_delay(bh
)) {
963 flags
= BMAPI_ALLOCATE
| trylock
;
966 flags
= BMAPI_WRITE
| BMAPI_MMAP
;
970 if (type
== IOMAP_NEW
) {
971 size
= xfs_probe_cluster(inode
,
977 err
= xfs_map_blocks(inode
, offset
, size
,
981 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
984 xfs_map_at_offset(bh
, offset
,
985 inode
->i_blkbits
, &iomap
);
987 xfs_add_to_ioend(inode
, bh
, offset
,
991 set_buffer_dirty(bh
);
993 mark_buffer_dirty(bh
);
998 } else if (buffer_uptodate(bh
) && startio
) {
1000 * we got here because the buffer is already mapped.
1001 * That means it must already have extents allocated
1002 * underneath it. Map the extent by reading it.
1004 if (!iomap_valid
|| type
!= 0) {
1006 size
= xfs_probe_cluster(inode
, page
, bh
,
1008 err
= xfs_map_blocks(inode
, offset
, size
,
1012 iomap_valid
= xfs_iomap_valid(&iomap
, offset
);
1016 if (!test_and_set_bit(BH_Lock
, &bh
->b_state
)) {
1017 ASSERT(buffer_mapped(bh
));
1020 xfs_add_to_ioend(inode
, bh
, offset
, type
,
1021 &ioend
, !iomap_valid
);
1027 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
1028 (unmapped
|| startio
)) {
1035 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1037 if (uptodate
&& bh
== head
)
1038 SetPageUptodate(page
);
1041 xfs_start_page_writeback(page
, wbc
, 1, count
);
1043 if (ioend
&& iomap_valid
) {
1044 offset
= (iomap
.iomap_offset
+ iomap
.iomap_bsize
- 1) >>
1046 tlast
= min_t(pgoff_t
, offset
, last_index
);
1047 xfs_cluster_write(inode
, page
->index
+ 1, &iomap
, &ioend
,
1048 wbc
, startio
, all_bh
, tlast
);
1052 xfs_submit_ioend(iohead
);
1058 xfs_cancel_ioend(iohead
);
1061 * If it's delalloc and we have nowhere to put it,
1062 * throw it away, unless the lower layers told
1065 if (err
!= -EAGAIN
) {
1067 block_invalidatepage(page
, 0);
1068 ClearPageUptodate(page
);
1074 * writepage: Called from one of two places:
1076 * 1. we are flushing a delalloc buffer head.
1078 * 2. we are writing out a dirty page. Typically the page dirty
1079 * state is cleared before we get here. In this case is it
1080 * conceivable we have no buffer heads.
1082 * For delalloc space on the page we need to allocate space and
1083 * flush it. For unmapped buffer heads on the page we should
1084 * allocate space if the page is uptodate. For any other dirty
1085 * buffer heads on the page we should flush them.
1087 * If we detect that a transaction would be required to flush
1088 * the page, we have to check the process flags first, if we
1089 * are already in a transaction or disk I/O during allocations
1090 * is off, we need to fail the writepage and redirty the page.
1096 struct writeback_control
*wbc
)
1100 int delalloc
, unmapped
, unwritten
;
1101 struct inode
*inode
= page
->mapping
->host
;
1103 xfs_page_trace(XFS_WRITEPAGE_ENTER
, inode
, page
, 0);
1106 * We need a transaction if:
1107 * 1. There are delalloc buffers on the page
1108 * 2. The page is uptodate and we have unmapped buffers
1109 * 3. The page is uptodate and we have no buffers
1110 * 4. There are unwritten buffers on the page
1113 if (!page_has_buffers(page
)) {
1117 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1118 if (!PageUptodate(page
))
1120 need_trans
= delalloc
+ unmapped
+ unwritten
;
1124 * If we need a transaction and the process flags say
1125 * we are already in a transaction, or no IO is allowed
1126 * then mark the page dirty again and leave the page
1129 if (current_test_flags(PF_FSTRANS
) && need_trans
)
1133 * Delay hooking up buffer heads until we have
1134 * made our go/no-go decision.
1136 if (!page_has_buffers(page
))
1137 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1140 * Convert delayed allocate, unwritten or unmapped space
1141 * to real space and flush out to disk.
1143 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1144 if (error
== -EAGAIN
)
1146 if (unlikely(error
< 0))
1152 redirty_page_for_writepage(wbc
, page
);
1162 struct address_space
*mapping
,
1163 struct writeback_control
*wbc
)
1165 struct bhv_vnode
*vp
= vn_from_inode(mapping
->host
);
1169 return generic_writepages(mapping
, wbc
);
1173 * Called to move a page into cleanable state - and from there
1174 * to be released. Possibly the page is already clean. We always
1175 * have buffer heads in this call.
1177 * Returns 0 if the page is ok to release, 1 otherwise.
1179 * Possible scenarios are:
1181 * 1. We are being called to release a page which has been written
1182 * to via regular I/O. buffer heads will be dirty and possibly
1183 * delalloc. If no delalloc buffer heads in this case then we
1184 * can just return zero.
1186 * 2. We are called to release a page which has been written via
1187 * mmap, all we need to do is ensure there is no delalloc
1188 * state in the buffer heads, if not we can let the caller
1189 * free them and we should come back later via writepage.
1196 struct inode
*inode
= page
->mapping
->host
;
1197 int dirty
, delalloc
, unmapped
, unwritten
;
1198 struct writeback_control wbc
= {
1199 .sync_mode
= WB_SYNC_ALL
,
1203 xfs_page_trace(XFS_RELEASEPAGE_ENTER
, inode
, page
, 0);
1205 if (!page_has_buffers(page
))
1208 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1209 if (!delalloc
&& !unwritten
)
1212 if (!(gfp_mask
& __GFP_FS
))
1215 /* If we are already inside a transaction or the thread cannot
1216 * do I/O, we cannot release this page.
1218 if (current_test_flags(PF_FSTRANS
))
1222 * Convert delalloc space to real space, do not flush the
1223 * data out to disk, that will be done by the caller.
1224 * Never need to allocate space here - we will always
1225 * come back to writepage in that case.
1227 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1228 if (dirty
== 0 && !unwritten
)
1233 return try_to_free_buffers(page
);
1238 struct inode
*inode
,
1240 struct buffer_head
*bh_result
,
1243 bmapi_flags_t flags
)
1245 bhv_vnode_t
*vp
= vn_from_inode(inode
);
1252 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1253 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1254 size
= bh_result
->b_size
;
1255 error
= bhv_vop_bmap(vp
, offset
, size
,
1256 create
? flags
: BMAPI_READ
, &iomap
, &niomap
);
1262 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
1264 * For unwritten extents do not report a disk address on
1265 * the read case (treat as if we're reading into a hole).
1267 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1268 xfs_map_buffer(bh_result
, &iomap
, offset
,
1271 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1273 bh_result
->b_private
= inode
;
1274 set_buffer_unwritten(bh_result
);
1275 set_buffer_delay(bh_result
);
1280 * If this is a realtime file, data may be on a different device.
1281 * to that pointed to from the buffer_head b_bdev currently.
1283 bh_result
->b_bdev
= iomap
.iomap_target
->bt_bdev
;
1286 * If we previously allocated a block out beyond eof and we are
1287 * now coming back to use it then we will need to flag it as new
1288 * even if it has a disk address.
1291 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1292 (offset
>= i_size_read(inode
)) || (iomap
.iomap_flags
& IOMAP_NEW
)))
1293 set_buffer_new(bh_result
);
1295 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
1298 set_buffer_uptodate(bh_result
);
1299 set_buffer_mapped(bh_result
);
1300 set_buffer_delay(bh_result
);
1304 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1305 ASSERT(iomap
.iomap_bsize
- iomap
.iomap_delta
> 0);
1306 offset
= min_t(xfs_off_t
,
1307 iomap
.iomap_bsize
- iomap
.iomap_delta
, size
);
1308 bh_result
->b_size
= (ssize_t
)min_t(xfs_off_t
, LONG_MAX
, offset
);
1316 struct inode
*inode
,
1318 struct buffer_head
*bh_result
,
1321 return __xfs_get_blocks(inode
, iblock
,
1322 bh_result
, create
, 0, BMAPI_WRITE
);
1326 xfs_get_blocks_direct(
1327 struct inode
*inode
,
1329 struct buffer_head
*bh_result
,
1332 return __xfs_get_blocks(inode
, iblock
,
1333 bh_result
, create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1343 xfs_ioend_t
*ioend
= iocb
->private;
1346 * Non-NULL private data means we need to issue a transaction to
1347 * convert a range from unwritten to written extents. This needs
1348 * to happen from process context but aio+dio I/O completion
1349 * happens from irq context so we need to defer it to a workqueue.
1350 * This is not necessary for synchronous direct I/O, but we do
1351 * it anyway to keep the code uniform and simpler.
1353 * The core direct I/O code might be changed to always call the
1354 * completion handler in the future, in which case all this can
1357 if (private && size
> 0) {
1358 ioend
->io_offset
= offset
;
1359 ioend
->io_size
= size
;
1360 xfs_finish_ioend(ioend
);
1362 xfs_destroy_ioend(ioend
);
1366 * blockdev_direct_IO can return an error even after the I/O
1367 * completion handler was called. Thus we need to protect
1368 * against double-freeing.
1370 iocb
->private = NULL
;
1377 const struct iovec
*iov
,
1379 unsigned long nr_segs
)
1381 struct file
*file
= iocb
->ki_filp
;
1382 struct inode
*inode
= file
->f_mapping
->host
;
1383 bhv_vnode_t
*vp
= vn_from_inode(inode
);
1389 error
= bhv_vop_bmap(vp
, offset
, 0, BMAPI_DEVICE
, &iomap
, &maps
);
1393 iocb
->private = xfs_alloc_ioend(inode
, IOMAP_UNWRITTEN
);
1396 ret
= blockdev_direct_IO_own_locking(rw
, iocb
, inode
,
1397 iomap
.iomap_target
->bt_bdev
,
1398 iov
, offset
, nr_segs
,
1399 xfs_get_blocks_direct
,
1402 ret
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
,
1403 iomap
.iomap_target
->bt_bdev
,
1404 iov
, offset
, nr_segs
,
1405 xfs_get_blocks_direct
,
1409 if (unlikely(ret
!= -EIOCBQUEUED
&& iocb
->private))
1410 xfs_destroy_ioend(iocb
->private);
1415 xfs_vm_prepare_write(
1421 return block_prepare_write(page
, from
, to
, xfs_get_blocks
);
1426 struct address_space
*mapping
,
1429 struct inode
*inode
= (struct inode
*)mapping
->host
;
1430 bhv_vnode_t
*vp
= vn_from_inode(inode
);
1432 vn_trace_entry(vp
, __FUNCTION__
, (inst_t
*)__return_address
);
1433 bhv_vop_rwlock(vp
, VRWLOCK_READ
);
1434 bhv_vop_flush_pages(vp
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1435 bhv_vop_rwunlock(vp
, VRWLOCK_READ
);
1436 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1441 struct file
*unused
,
1444 return mpage_readpage(page
, xfs_get_blocks
);
1449 struct file
*unused
,
1450 struct address_space
*mapping
,
1451 struct list_head
*pages
,
1454 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1458 xfs_vm_invalidatepage(
1460 unsigned long offset
)
1462 xfs_page_trace(XFS_INVALIDPAGE_ENTER
,
1463 page
->mapping
->host
, page
, offset
);
1464 block_invalidatepage(page
, offset
);
1467 const struct address_space_operations xfs_address_space_operations
= {
1468 .readpage
= xfs_vm_readpage
,
1469 .readpages
= xfs_vm_readpages
,
1470 .writepage
= xfs_vm_writepage
,
1471 .writepages
= xfs_vm_writepages
,
1472 .sync_page
= block_sync_page
,
1473 .releasepage
= xfs_vm_releasepage
,
1474 .invalidatepage
= xfs_vm_invalidatepage
,
1475 .prepare_write
= xfs_vm_prepare_write
,
1476 .commit_write
= generic_commit_write
,
1477 .bmap
= xfs_vm_bmap
,
1478 .direct_IO
= xfs_vm_direct_IO
,
1479 .migratepage
= buffer_migrate_page
,