[XFS] Block on unwritten extent conversion during synchronous direct I/O.
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_aops.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_bit.h"
1da177e4 20#include "xfs_log.h"
a844f451 21#include "xfs_inum.h"
1da177e4 22#include "xfs_sb.h"
a844f451 23#include "xfs_ag.h"
1da177e4
LT
24#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
1da177e4 31#include "xfs_dir2_sf.h"
a844f451 32#include "xfs_attr_sf.h"
1da177e4
LT
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
a844f451
NS
35#include "xfs_alloc.h"
36#include "xfs_btree.h"
1da177e4
LT
37#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
40#include <linux/mpage.h>
10ce4444 41#include <linux/pagevec.h>
1da177e4
LT
42#include <linux/writeback.h>
43
f51623b2
NS
44STATIC void
45xfs_count_page_state(
46 struct page *page,
47 int *delalloc,
48 int *unmapped,
49 int *unwritten)
50{
51 struct buffer_head *bh, *head;
52
53 *delalloc = *unmapped = *unwritten = 0;
54
55 bh = head = page_buffers(page);
56 do {
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
58 (*unmapped) = 1;
f51623b2
NS
59 else if (buffer_unwritten(bh))
60 (*unwritten) = 1;
61 else if (buffer_delay(bh))
62 (*delalloc) = 1;
63 } while ((bh = bh->b_this_page) != head);
64}
65
1da177e4
LT
66#if defined(XFS_RW_TRACE)
67void
68xfs_page_trace(
69 int tag,
70 struct inode *inode,
71 struct page *page,
ed9d88f7 72 unsigned long pgoff)
1da177e4
LT
73{
74 xfs_inode_t *ip;
67fcaa73 75 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 76 loff_t isize = i_size_read(inode);
f6d6d4fc 77 loff_t offset = page_offset(page);
1da177e4
LT
78 int delalloc = -1, unmapped = -1, unwritten = -1;
79
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
82
75e17b3c 83 ip = xfs_vtoi(vp);
1da177e4
LT
84 if (!ip->i_rwtrace)
85 return;
86
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
89 (void *)ip,
90 (void *)inode,
91 (void *)page,
ed9d88f7 92 (void *)pgoff,
1da177e4
LT
93 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
f1fdc848 102 (void *)((unsigned long)current_pid()),
1da177e4
LT
103 (void *)NULL);
104}
105#else
ed9d88f7 106#define xfs_page_trace(tag, inode, page, pgoff)
1da177e4
LT
107#endif
108
0829c360
CH
109/*
110 * Schedule IO completion handling on a xfsdatad if this was
e927af90
DC
111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
0829c360
CH
113 */
114STATIC void
115xfs_finish_ioend(
e927af90
DC
116 xfs_ioend_t *ioend,
117 int wait)
0829c360 118{
e927af90 119 if (atomic_dec_and_test(&ioend->io_remaining)) {
0829c360 120 queue_work(xfsdatad_workqueue, &ioend->io_work);
e927af90
DC
121 if (wait)
122 flush_workqueue(xfsdatad_workqueue);
123 }
0829c360
CH
124}
125
f6d6d4fc
CH
126/*
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
131 */
0829c360
CH
132STATIC void
133xfs_destroy_ioend(
134 xfs_ioend_t *ioend)
135{
f6d6d4fc
CH
136 struct buffer_head *bh, *next;
137
138 for (bh = ioend->io_buffer_head; bh; bh = next) {
139 next = bh->b_private;
7d04a335 140 bh->b_end_io(bh, !ioend->io_error);
f6d6d4fc 141 }
7d04a335
NS
142 if (unlikely(ioend->io_error))
143 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
0829c360
CH
144 vn_iowake(ioend->io_vnode);
145 mempool_free(ioend, xfs_ioend_pool);
146}
147
ba87ea69
LM
148/*
149 * Update on-disk file size now that data has been written to disk.
150 * The current in-memory file size is i_size. If a write is beyond
151 * eof io_new_size will be the intended file size until i_size is
152 * updated. If this write does not extend all the way to the valid
153 * file size then restrict this update to the end of the write.
154 */
155STATIC void
156xfs_setfilesize(
157 xfs_ioend_t *ioend)
158{
159 xfs_inode_t *ip;
160 xfs_fsize_t isize;
161 xfs_fsize_t bsize;
162
163 ip = xfs_vtoi(ioend->io_vnode);
164
165 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
166 ASSERT(ioend->io_type != IOMAP_READ);
167
168 if (unlikely(ioend->io_error))
169 return;
170
171 bsize = ioend->io_offset + ioend->io_size;
172
173 xfs_ilock(ip, XFS_ILOCK_EXCL);
174
175 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
176 isize = MIN(isize, bsize);
177
178 if (ip->i_d.di_size < isize) {
179 ip->i_d.di_size = isize;
180 ip->i_update_core = 1;
181 ip->i_update_size = 1;
182 }
183
184 xfs_iunlock(ip, XFS_ILOCK_EXCL);
185}
186
0829c360 187/*
f6d6d4fc 188 * Buffered IO write completion for delayed allocate extents.
f6d6d4fc
CH
189 */
190STATIC void
191xfs_end_bio_delalloc(
c4028958 192 struct work_struct *work)
f6d6d4fc 193{
c4028958
DH
194 xfs_ioend_t *ioend =
195 container_of(work, xfs_ioend_t, io_work);
f6d6d4fc 196
ba87ea69 197 xfs_setfilesize(ioend);
f6d6d4fc
CH
198 xfs_destroy_ioend(ioend);
199}
200
201/*
202 * Buffered IO write completion for regular, written extents.
203 */
204STATIC void
205xfs_end_bio_written(
c4028958 206 struct work_struct *work)
f6d6d4fc 207{
c4028958
DH
208 xfs_ioend_t *ioend =
209 container_of(work, xfs_ioend_t, io_work);
f6d6d4fc 210
ba87ea69 211 xfs_setfilesize(ioend);
f6d6d4fc
CH
212 xfs_destroy_ioend(ioend);
213}
214
215/*
216 * IO write completion for unwritten extents.
217 *
0829c360 218 * Issue transactions to convert a buffer range from unwritten
f0973863 219 * to written extents.
0829c360
CH
220 */
221STATIC void
222xfs_end_bio_unwritten(
c4028958 223 struct work_struct *work)
0829c360 224{
c4028958
DH
225 xfs_ioend_t *ioend =
226 container_of(work, xfs_ioend_t, io_work);
67fcaa73 227 bhv_vnode_t *vp = ioend->io_vnode;
0829c360
CH
228 xfs_off_t offset = ioend->io_offset;
229 size_t size = ioend->io_size;
0829c360 230
ba87ea69 231 if (likely(!ioend->io_error)) {
67fcaa73 232 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
ba87ea69
LM
233 xfs_setfilesize(ioend);
234 }
235 xfs_destroy_ioend(ioend);
236}
237
238/*
239 * IO read completion for regular, written extents.
240 */
241STATIC void
242xfs_end_bio_read(
243 struct work_struct *work)
244{
245 xfs_ioend_t *ioend =
246 container_of(work, xfs_ioend_t, io_work);
247
0829c360
CH
248 xfs_destroy_ioend(ioend);
249}
250
251/*
252 * Allocate and initialise an IO completion structure.
253 * We need to track unwritten extent write completion here initially.
254 * We'll need to extend this for updating the ondisk inode size later
255 * (vs. incore size).
256 */
257STATIC xfs_ioend_t *
258xfs_alloc_ioend(
f6d6d4fc
CH
259 struct inode *inode,
260 unsigned int type)
0829c360
CH
261{
262 xfs_ioend_t *ioend;
263
264 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
265
266 /*
267 * Set the count to 1 initially, which will prevent an I/O
268 * completion callback from happening before we have started
269 * all the I/O from calling the completion routine too early.
270 */
271 atomic_set(&ioend->io_remaining, 1);
7d04a335 272 ioend->io_error = 0;
f6d6d4fc
CH
273 ioend->io_list = NULL;
274 ioend->io_type = type;
ec86dc02 275 ioend->io_vnode = vn_from_inode(inode);
c1a073bd 276 ioend->io_buffer_head = NULL;
f6d6d4fc 277 ioend->io_buffer_tail = NULL;
0829c360
CH
278 atomic_inc(&ioend->io_vnode->v_iocount);
279 ioend->io_offset = 0;
280 ioend->io_size = 0;
281
f6d6d4fc 282 if (type == IOMAP_UNWRITTEN)
c4028958 283 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
f6d6d4fc 284 else if (type == IOMAP_DELAY)
c4028958 285 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
ba87ea69
LM
286 else if (type == IOMAP_READ)
287 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
f6d6d4fc 288 else
c4028958 289 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
0829c360
CH
290
291 return ioend;
292}
293
1da177e4
LT
294STATIC int
295xfs_map_blocks(
296 struct inode *inode,
297 loff_t offset,
298 ssize_t count,
299 xfs_iomap_t *mapp,
300 int flags)
301{
67fcaa73 302 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4
LT
303 int error, nmaps = 1;
304
67fcaa73 305 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
1da177e4
LT
306 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
307 VMODIFY(vp);
308 return -error;
309}
310
7989cb8e 311STATIC_INLINE int
1defeac9 312xfs_iomap_valid(
1da177e4 313 xfs_iomap_t *iomapp,
1defeac9 314 loff_t offset)
1da177e4 315{
1defeac9
CH
316 return offset >= iomapp->iomap_offset &&
317 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
1da177e4
LT
318}
319
f6d6d4fc
CH
320/*
321 * BIO completion handler for buffered IO.
322 */
323STATIC int
324xfs_end_bio(
325 struct bio *bio,
326 unsigned int bytes_done,
327 int error)
328{
329 xfs_ioend_t *ioend = bio->bi_private;
330
331 if (bio->bi_size)
332 return 1;
333
f6d6d4fc 334 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
7d04a335 335 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
f6d6d4fc
CH
336
337 /* Toss bio and pass work off to an xfsdatad thread */
f6d6d4fc
CH
338 bio->bi_private = NULL;
339 bio->bi_end_io = NULL;
f6d6d4fc 340 bio_put(bio);
7d04a335 341
e927af90 342 xfs_finish_ioend(ioend, 0);
f6d6d4fc
CH
343 return 0;
344}
345
346STATIC void
347xfs_submit_ioend_bio(
348 xfs_ioend_t *ioend,
349 struct bio *bio)
350{
351 atomic_inc(&ioend->io_remaining);
352
353 bio->bi_private = ioend;
354 bio->bi_end_io = xfs_end_bio;
355
356 submit_bio(WRITE, bio);
357 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
358 bio_put(bio);
359}
360
361STATIC struct bio *
362xfs_alloc_ioend_bio(
363 struct buffer_head *bh)
364{
365 struct bio *bio;
366 int nvecs = bio_get_nr_vecs(bh->b_bdev);
367
368 do {
369 bio = bio_alloc(GFP_NOIO, nvecs);
370 nvecs >>= 1;
371 } while (!bio);
372
373 ASSERT(bio->bi_private == NULL);
374 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
375 bio->bi_bdev = bh->b_bdev;
376 bio_get(bio);
377 return bio;
378}
379
380STATIC void
381xfs_start_buffer_writeback(
382 struct buffer_head *bh)
383{
384 ASSERT(buffer_mapped(bh));
385 ASSERT(buffer_locked(bh));
386 ASSERT(!buffer_delay(bh));
387 ASSERT(!buffer_unwritten(bh));
388
389 mark_buffer_async_write(bh);
390 set_buffer_uptodate(bh);
391 clear_buffer_dirty(bh);
392}
393
394STATIC void
395xfs_start_page_writeback(
396 struct page *page,
397 struct writeback_control *wbc,
398 int clear_dirty,
399 int buffers)
400{
401 ASSERT(PageLocked(page));
402 ASSERT(!PageWriteback(page));
f6d6d4fc 403 if (clear_dirty)
92132021
DC
404 clear_page_dirty_for_io(page);
405 set_page_writeback(page);
f6d6d4fc
CH
406 unlock_page(page);
407 if (!buffers) {
408 end_page_writeback(page);
409 wbc->pages_skipped++; /* We didn't write this page */
410 }
411}
412
413static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
414{
415 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
416}
417
418/*
d88992f6
DC
419 * Submit all of the bios for all of the ioends we have saved up, covering the
420 * initial writepage page and also any probed pages.
421 *
422 * Because we may have multiple ioends spanning a page, we need to start
423 * writeback on all the buffers before we submit them for I/O. If we mark the
424 * buffers as we got, then we can end up with a page that only has buffers
425 * marked async write and I/O complete on can occur before we mark the other
426 * buffers async write.
427 *
428 * The end result of this is that we trip a bug in end_page_writeback() because
429 * we call it twice for the one page as the code in end_buffer_async_write()
430 * assumes that all buffers on the page are started at the same time.
431 *
432 * The fix is two passes across the ioend list - one to start writeback on the
c41564b5 433 * buffer_heads, and then submit them for I/O on the second pass.
f6d6d4fc
CH
434 */
435STATIC void
436xfs_submit_ioend(
437 xfs_ioend_t *ioend)
438{
d88992f6 439 xfs_ioend_t *head = ioend;
f6d6d4fc
CH
440 xfs_ioend_t *next;
441 struct buffer_head *bh;
442 struct bio *bio;
443 sector_t lastblock = 0;
444
d88992f6
DC
445 /* Pass 1 - start writeback */
446 do {
447 next = ioend->io_list;
448 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
449 xfs_start_buffer_writeback(bh);
450 }
451 } while ((ioend = next) != NULL);
452
453 /* Pass 2 - submit I/O */
454 ioend = head;
f6d6d4fc
CH
455 do {
456 next = ioend->io_list;
457 bio = NULL;
458
459 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
f6d6d4fc
CH
460
461 if (!bio) {
462 retry:
463 bio = xfs_alloc_ioend_bio(bh);
464 } else if (bh->b_blocknr != lastblock + 1) {
465 xfs_submit_ioend_bio(ioend, bio);
466 goto retry;
467 }
468
469 if (bio_add_buffer(bio, bh) != bh->b_size) {
470 xfs_submit_ioend_bio(ioend, bio);
471 goto retry;
472 }
473
474 lastblock = bh->b_blocknr;
475 }
476 if (bio)
477 xfs_submit_ioend_bio(ioend, bio);
e927af90 478 xfs_finish_ioend(ioend, 0);
f6d6d4fc
CH
479 } while ((ioend = next) != NULL);
480}
481
482/*
483 * Cancel submission of all buffer_heads so far in this endio.
484 * Toss the endio too. Only ever called for the initial page
485 * in a writepage request, so only ever one page.
486 */
487STATIC void
488xfs_cancel_ioend(
489 xfs_ioend_t *ioend)
490{
491 xfs_ioend_t *next;
492 struct buffer_head *bh, *next_bh;
493
494 do {
495 next = ioend->io_list;
496 bh = ioend->io_buffer_head;
497 do {
498 next_bh = bh->b_private;
499 clear_buffer_async_write(bh);
500 unlock_buffer(bh);
501 } while ((bh = next_bh) != NULL);
502
503 vn_iowake(ioend->io_vnode);
504 mempool_free(ioend, xfs_ioend_pool);
505 } while ((ioend = next) != NULL);
506}
507
508/*
509 * Test to see if we've been building up a completion structure for
510 * earlier buffers -- if so, we try to append to this ioend if we
511 * can, otherwise we finish off any current ioend and start another.
512 * Return true if we've finished the given ioend.
513 */
514STATIC void
515xfs_add_to_ioend(
516 struct inode *inode,
517 struct buffer_head *bh,
7336cea8 518 xfs_off_t offset,
f6d6d4fc
CH
519 unsigned int type,
520 xfs_ioend_t **result,
521 int need_ioend)
522{
523 xfs_ioend_t *ioend = *result;
524
525 if (!ioend || need_ioend || type != ioend->io_type) {
526 xfs_ioend_t *previous = *result;
f6d6d4fc 527
f6d6d4fc
CH
528 ioend = xfs_alloc_ioend(inode, type);
529 ioend->io_offset = offset;
530 ioend->io_buffer_head = bh;
531 ioend->io_buffer_tail = bh;
532 if (previous)
533 previous->io_list = ioend;
534 *result = ioend;
535 } else {
536 ioend->io_buffer_tail->b_private = bh;
537 ioend->io_buffer_tail = bh;
538 }
539
540 bh->b_private = NULL;
541 ioend->io_size += bh->b_size;
542}
543
87cbc49c
NS
544STATIC void
545xfs_map_buffer(
546 struct buffer_head *bh,
547 xfs_iomap_t *mp,
548 xfs_off_t offset,
549 uint block_bits)
550{
551 sector_t bn;
552
553 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
554
555 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
556 ((offset - mp->iomap_offset) >> block_bits);
557
558 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
559
560 bh->b_blocknr = bn;
561 set_buffer_mapped(bh);
562}
563
1da177e4
LT
564STATIC void
565xfs_map_at_offset(
1da177e4 566 struct buffer_head *bh,
1defeac9 567 loff_t offset,
1da177e4 568 int block_bits,
1defeac9 569 xfs_iomap_t *iomapp)
1da177e4 570{
1da177e4
LT
571 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
572 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
1da177e4
LT
573
574 lock_buffer(bh);
87cbc49c 575 xfs_map_buffer(bh, iomapp, offset, block_bits);
ce8e922c 576 bh->b_bdev = iomapp->iomap_target->bt_bdev;
1da177e4
LT
577 set_buffer_mapped(bh);
578 clear_buffer_delay(bh);
f6d6d4fc 579 clear_buffer_unwritten(bh);
1da177e4
LT
580}
581
582/*
6c4fe19f 583 * Look for a page at index that is suitable for clustering.
1da177e4
LT
584 */
585STATIC unsigned int
6c4fe19f 586xfs_probe_page(
10ce4444 587 struct page *page,
6c4fe19f
CH
588 unsigned int pg_offset,
589 int mapped)
1da177e4 590{
1da177e4
LT
591 int ret = 0;
592
1da177e4 593 if (PageWriteback(page))
10ce4444 594 return 0;
1da177e4
LT
595
596 if (page->mapping && PageDirty(page)) {
597 if (page_has_buffers(page)) {
598 struct buffer_head *bh, *head;
599
600 bh = head = page_buffers(page);
601 do {
6c4fe19f
CH
602 if (!buffer_uptodate(bh))
603 break;
604 if (mapped != buffer_mapped(bh))
1da177e4
LT
605 break;
606 ret += bh->b_size;
607 if (ret >= pg_offset)
608 break;
609 } while ((bh = bh->b_this_page) != head);
610 } else
6c4fe19f 611 ret = mapped ? 0 : PAGE_CACHE_SIZE;
1da177e4
LT
612 }
613
1da177e4
LT
614 return ret;
615}
616
f6d6d4fc 617STATIC size_t
6c4fe19f 618xfs_probe_cluster(
1da177e4
LT
619 struct inode *inode,
620 struct page *startpage,
621 struct buffer_head *bh,
6c4fe19f
CH
622 struct buffer_head *head,
623 int mapped)
1da177e4 624{
10ce4444 625 struct pagevec pvec;
1da177e4 626 pgoff_t tindex, tlast, tloff;
10ce4444
CH
627 size_t total = 0;
628 int done = 0, i;
1da177e4
LT
629
630 /* First sum forwards in this page */
631 do {
2353e8e9 632 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
10ce4444 633 return total;
1da177e4
LT
634 total += bh->b_size;
635 } while ((bh = bh->b_this_page) != head);
636
10ce4444
CH
637 /* if we reached the end of the page, sum forwards in following pages */
638 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
639 tindex = startpage->index + 1;
640
641 /* Prune this back to avoid pathological behavior */
642 tloff = min(tlast, startpage->index + 64);
643
644 pagevec_init(&pvec, 0);
645 while (!done && tindex <= tloff) {
646 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
647
648 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
649 break;
650
651 for (i = 0; i < pagevec_count(&pvec); i++) {
652 struct page *page = pvec.pages[i];
653 size_t pg_offset, len = 0;
654
655 if (tindex == tlast) {
656 pg_offset =
657 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
1defeac9
CH
658 if (!pg_offset) {
659 done = 1;
10ce4444 660 break;
1defeac9 661 }
10ce4444
CH
662 } else
663 pg_offset = PAGE_CACHE_SIZE;
664
665 if (page->index == tindex && !TestSetPageLocked(page)) {
6c4fe19f 666 len = xfs_probe_page(page, pg_offset, mapped);
10ce4444
CH
667 unlock_page(page);
668 }
669
670 if (!len) {
671 done = 1;
672 break;
673 }
674
1da177e4 675 total += len;
1defeac9 676 tindex++;
1da177e4 677 }
10ce4444
CH
678
679 pagevec_release(&pvec);
680 cond_resched();
1da177e4 681 }
10ce4444 682
1da177e4
LT
683 return total;
684}
685
686/*
10ce4444
CH
687 * Test if a given page is suitable for writing as part of an unwritten
688 * or delayed allocate extent.
1da177e4 689 */
10ce4444
CH
690STATIC int
691xfs_is_delayed_page(
692 struct page *page,
f6d6d4fc 693 unsigned int type)
1da177e4 694{
1da177e4 695 if (PageWriteback(page))
10ce4444 696 return 0;
1da177e4
LT
697
698 if (page->mapping && page_has_buffers(page)) {
699 struct buffer_head *bh, *head;
700 int acceptable = 0;
701
702 bh = head = page_buffers(page);
703 do {
f6d6d4fc
CH
704 if (buffer_unwritten(bh))
705 acceptable = (type == IOMAP_UNWRITTEN);
706 else if (buffer_delay(bh))
707 acceptable = (type == IOMAP_DELAY);
2ddee844 708 else if (buffer_dirty(bh) && buffer_mapped(bh))
df3c7244 709 acceptable = (type == IOMAP_NEW);
f6d6d4fc 710 else
1da177e4 711 break;
1da177e4
LT
712 } while ((bh = bh->b_this_page) != head);
713
714 if (acceptable)
10ce4444 715 return 1;
1da177e4
LT
716 }
717
10ce4444 718 return 0;
1da177e4
LT
719}
720
1da177e4
LT
721/*
722 * Allocate & map buffers for page given the extent map. Write it out.
723 * except for the original page of a writepage, this is called on
724 * delalloc/unwritten pages only, for the original page it is possible
725 * that the page has no mapping at all.
726 */
f6d6d4fc 727STATIC int
1da177e4
LT
728xfs_convert_page(
729 struct inode *inode,
730 struct page *page,
10ce4444 731 loff_t tindex,
1defeac9 732 xfs_iomap_t *mp,
f6d6d4fc 733 xfs_ioend_t **ioendp,
1da177e4 734 struct writeback_control *wbc,
1da177e4
LT
735 int startio,
736 int all_bh)
737{
f6d6d4fc 738 struct buffer_head *bh, *head;
9260dc6b
CH
739 xfs_off_t end_offset;
740 unsigned long p_offset;
f6d6d4fc 741 unsigned int type;
1da177e4 742 int bbits = inode->i_blkbits;
24e17b5f 743 int len, page_dirty;
f6d6d4fc 744 int count = 0, done = 0, uptodate = 1;
9260dc6b 745 xfs_off_t offset = page_offset(page);
1da177e4 746
10ce4444
CH
747 if (page->index != tindex)
748 goto fail;
749 if (TestSetPageLocked(page))
750 goto fail;
751 if (PageWriteback(page))
752 goto fail_unlock_page;
753 if (page->mapping != inode->i_mapping)
754 goto fail_unlock_page;
755 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
756 goto fail_unlock_page;
757
24e17b5f
NS
758 /*
759 * page_dirty is initially a count of buffers on the page before
c41564b5 760 * EOF and is decremented as we move each into a cleanable state.
9260dc6b
CH
761 *
762 * Derivation:
763 *
764 * End offset is the highest offset that this page should represent.
765 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
766 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
767 * hence give us the correct page_dirty count. On any other page,
768 * it will be zero and in that case we need page_dirty to be the
769 * count of buffers on the page.
24e17b5f 770 */
9260dc6b
CH
771 end_offset = min_t(unsigned long long,
772 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
773 i_size_read(inode));
774
24e17b5f 775 len = 1 << inode->i_blkbits;
9260dc6b
CH
776 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
777 PAGE_CACHE_SIZE);
778 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
779 page_dirty = p_offset / len;
24e17b5f 780
1da177e4
LT
781 bh = head = page_buffers(page);
782 do {
9260dc6b 783 if (offset >= end_offset)
1da177e4 784 break;
f6d6d4fc
CH
785 if (!buffer_uptodate(bh))
786 uptodate = 0;
787 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
788 done = 1;
1da177e4 789 continue;
f6d6d4fc
CH
790 }
791
9260dc6b
CH
792 if (buffer_unwritten(bh) || buffer_delay(bh)) {
793 if (buffer_unwritten(bh))
794 type = IOMAP_UNWRITTEN;
795 else
796 type = IOMAP_DELAY;
797
798 if (!xfs_iomap_valid(mp, offset)) {
f6d6d4fc 799 done = 1;
9260dc6b
CH
800 continue;
801 }
802
803 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
804 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
805
806 xfs_map_at_offset(bh, offset, bbits, mp);
807 if (startio) {
7336cea8 808 xfs_add_to_ioend(inode, bh, offset,
9260dc6b
CH
809 type, ioendp, done);
810 } else {
811 set_buffer_dirty(bh);
812 unlock_buffer(bh);
813 mark_buffer_dirty(bh);
814 }
815 page_dirty--;
816 count++;
817 } else {
df3c7244 818 type = IOMAP_NEW;
9260dc6b 819 if (buffer_mapped(bh) && all_bh && startio) {
1da177e4 820 lock_buffer(bh);
7336cea8 821 xfs_add_to_ioend(inode, bh, offset,
f6d6d4fc
CH
822 type, ioendp, done);
823 count++;
24e17b5f 824 page_dirty--;
9260dc6b
CH
825 } else {
826 done = 1;
1da177e4 827 }
1da177e4 828 }
7336cea8 829 } while (offset += len, (bh = bh->b_this_page) != head);
1da177e4 830
f6d6d4fc
CH
831 if (uptodate && bh == head)
832 SetPageUptodate(page);
833
834 if (startio) {
f5e596bb
CH
835 if (count) {
836 struct backing_dev_info *bdi;
837
838 bdi = inode->i_mapping->backing_dev_info;
9fddaca2 839 wbc->nr_to_write--;
f5e596bb
CH
840 if (bdi_write_congested(bdi)) {
841 wbc->encountered_congestion = 1;
842 done = 1;
9fddaca2 843 } else if (wbc->nr_to_write <= 0) {
f5e596bb
CH
844 done = 1;
845 }
846 }
f6d6d4fc 847 xfs_start_page_writeback(page, wbc, !page_dirty, count);
1da177e4 848 }
f6d6d4fc
CH
849
850 return done;
10ce4444
CH
851 fail_unlock_page:
852 unlock_page(page);
853 fail:
854 return 1;
1da177e4
LT
855}
856
857/*
858 * Convert & write out a cluster of pages in the same extent as defined
859 * by mp and following the start page.
860 */
861STATIC void
862xfs_cluster_write(
863 struct inode *inode,
864 pgoff_t tindex,
865 xfs_iomap_t *iomapp,
f6d6d4fc 866 xfs_ioend_t **ioendp,
1da177e4
LT
867 struct writeback_control *wbc,
868 int startio,
869 int all_bh,
870 pgoff_t tlast)
871{
10ce4444
CH
872 struct pagevec pvec;
873 int done = 0, i;
1da177e4 874
10ce4444
CH
875 pagevec_init(&pvec, 0);
876 while (!done && tindex <= tlast) {
877 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
878
879 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
1da177e4 880 break;
10ce4444
CH
881
882 for (i = 0; i < pagevec_count(&pvec); i++) {
883 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
884 iomapp, ioendp, wbc, startio, all_bh);
885 if (done)
886 break;
887 }
888
889 pagevec_release(&pvec);
890 cond_resched();
1da177e4
LT
891 }
892}
893
894/*
895 * Calling this without startio set means we are being asked to make a dirty
896 * page ready for freeing it's buffers. When called with startio set then
897 * we are coming from writepage.
898 *
899 * When called with startio set it is important that we write the WHOLE
900 * page if possible.
901 * The bh->b_state's cannot know if any of the blocks or which block for
902 * that matter are dirty due to mmap writes, and therefore bh uptodate is
c41564b5 903 * only valid if the page itself isn't completely uptodate. Some layers
1da177e4
LT
904 * may clear the page dirty flag prior to calling write page, under the
905 * assumption the entire page will be written out; by not writing out the
906 * whole page the page can be reused before all valid dirty data is
907 * written out. Note: in the case of a page that has been dirty'd by
908 * mapwrite and but partially setup by block_prepare_write the
909 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
910 * valid state, thus the whole page must be written out thing.
911 */
912
913STATIC int
914xfs_page_state_convert(
915 struct inode *inode,
916 struct page *page,
917 struct writeback_control *wbc,
918 int startio,
919 int unmapped) /* also implies page uptodate */
920{
f6d6d4fc 921 struct buffer_head *bh, *head;
1defeac9 922 xfs_iomap_t iomap;
f6d6d4fc 923 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1da177e4
LT
924 loff_t offset;
925 unsigned long p_offset = 0;
f6d6d4fc 926 unsigned int type;
1da177e4
LT
927 __uint64_t end_offset;
928 pgoff_t end_index, last_index, tlast;
d5cb48aa
CH
929 ssize_t size, len;
930 int flags, err, iomap_valid = 0, uptodate = 1;
8272145c
NS
931 int page_dirty, count = 0;
932 int trylock = 0;
6c4fe19f 933 int all_bh = unmapped;
1da177e4 934
8272145c
NS
935 if (startio) {
936 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
937 trylock |= BMAPI_TRYLOCK;
938 }
3ba0815a 939
1da177e4
LT
940 /* Is this page beyond the end of the file? */
941 offset = i_size_read(inode);
942 end_index = offset >> PAGE_CACHE_SHIFT;
943 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
944 if (page->index >= end_index) {
945 if ((page->index >= end_index + 1) ||
946 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
19d5bcf3
NS
947 if (startio)
948 unlock_page(page);
949 return 0;
1da177e4
LT
950 }
951 }
952
1da177e4 953 /*
24e17b5f 954 * page_dirty is initially a count of buffers on the page before
c41564b5 955 * EOF and is decremented as we move each into a cleanable state.
f6d6d4fc
CH
956 *
957 * Derivation:
958 *
959 * End offset is the highest offset that this page should represent.
960 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
961 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
962 * hence give us the correct page_dirty count. On any other page,
963 * it will be zero and in that case we need page_dirty to be the
964 * count of buffers on the page.
965 */
966 end_offset = min_t(unsigned long long,
967 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
24e17b5f 968 len = 1 << inode->i_blkbits;
f6d6d4fc
CH
969 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
970 PAGE_CACHE_SIZE);
971 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
24e17b5f
NS
972 page_dirty = p_offset / len;
973
24e17b5f 974 bh = head = page_buffers(page);
f6d6d4fc 975 offset = page_offset(page);
df3c7244
DC
976 flags = BMAPI_READ;
977 type = IOMAP_NEW;
f6d6d4fc 978
f6d6d4fc 979 /* TODO: cleanup count and page_dirty */
1da177e4
LT
980
981 do {
982 if (offset >= end_offset)
983 break;
984 if (!buffer_uptodate(bh))
985 uptodate = 0;
f6d6d4fc 986 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1defeac9
CH
987 /*
988 * the iomap is actually still valid, but the ioend
989 * isn't. shouldn't happen too often.
990 */
991 iomap_valid = 0;
1da177e4 992 continue;
f6d6d4fc 993 }
1da177e4 994
1defeac9
CH
995 if (iomap_valid)
996 iomap_valid = xfs_iomap_valid(&iomap, offset);
1da177e4
LT
997
998 /*
999 * First case, map an unwritten extent and prepare for
1000 * extent state conversion transaction on completion.
f6d6d4fc 1001 *
1da177e4
LT
1002 * Second case, allocate space for a delalloc buffer.
1003 * We can return EAGAIN here in the release page case.
d5cb48aa
CH
1004 *
1005 * Third case, an unmapped buffer was found, and we are
1006 * in a path where we need to write the whole page out.
df3c7244 1007 */
d5cb48aa
CH
1008 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1009 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1010 !buffer_mapped(bh) && (unmapped || startio))) {
df3c7244 1011 /*
6c4fe19f
CH
1012 * Make sure we don't use a read-only iomap
1013 */
df3c7244 1014 if (flags == BMAPI_READ)
6c4fe19f
CH
1015 iomap_valid = 0;
1016
f6d6d4fc
CH
1017 if (buffer_unwritten(bh)) {
1018 type = IOMAP_UNWRITTEN;
8272145c 1019 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
d5cb48aa 1020 } else if (buffer_delay(bh)) {
f6d6d4fc 1021 type = IOMAP_DELAY;
8272145c 1022 flags = BMAPI_ALLOCATE | trylock;
d5cb48aa 1023 } else {
6c4fe19f 1024 type = IOMAP_NEW;
8272145c 1025 flags = BMAPI_WRITE | BMAPI_MMAP;
f6d6d4fc
CH
1026 }
1027
1defeac9 1028 if (!iomap_valid) {
6c4fe19f
CH
1029 if (type == IOMAP_NEW) {
1030 size = xfs_probe_cluster(inode,
1031 page, bh, head, 0);
d5cb48aa
CH
1032 } else {
1033 size = len;
1034 }
1035
1036 err = xfs_map_blocks(inode, offset, size,
1037 &iomap, flags);
f6d6d4fc 1038 if (err)
1da177e4 1039 goto error;
1defeac9 1040 iomap_valid = xfs_iomap_valid(&iomap, offset);
1da177e4 1041 }
1defeac9
CH
1042 if (iomap_valid) {
1043 xfs_map_at_offset(bh, offset,
1044 inode->i_blkbits, &iomap);
1da177e4 1045 if (startio) {
7336cea8 1046 xfs_add_to_ioend(inode, bh, offset,
1defeac9
CH
1047 type, &ioend,
1048 !iomap_valid);
1da177e4
LT
1049 } else {
1050 set_buffer_dirty(bh);
1051 unlock_buffer(bh);
1052 mark_buffer_dirty(bh);
1053 }
1054 page_dirty--;
f6d6d4fc 1055 count++;
1da177e4 1056 }
d5cb48aa 1057 } else if (buffer_uptodate(bh) && startio) {
6c4fe19f
CH
1058 /*
1059 * we got here because the buffer is already mapped.
1060 * That means it must already have extents allocated
1061 * underneath it. Map the extent by reading it.
1062 */
df3c7244 1063 if (!iomap_valid || flags != BMAPI_READ) {
6c4fe19f
CH
1064 flags = BMAPI_READ;
1065 size = xfs_probe_cluster(inode, page, bh,
1066 head, 1);
1067 err = xfs_map_blocks(inode, offset, size,
1068 &iomap, flags);
1069 if (err)
1070 goto error;
1071 iomap_valid = xfs_iomap_valid(&iomap, offset);
1072 }
d5cb48aa 1073
df3c7244
DC
1074 /*
1075 * We set the type to IOMAP_NEW in case we are doing a
1076 * small write at EOF that is extending the file but
1077 * without needing an allocation. We need to update the
1078 * file size on I/O completion in this case so it is
1079 * the same case as having just allocated a new extent
1080 * that we are writing into for the first time.
1081 */
1082 type = IOMAP_NEW;
d5cb48aa
CH
1083 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1084 ASSERT(buffer_mapped(bh));
6c4fe19f
CH
1085 if (iomap_valid)
1086 all_bh = 1;
7336cea8 1087 xfs_add_to_ioend(inode, bh, offset, type,
d5cb48aa
CH
1088 &ioend, !iomap_valid);
1089 page_dirty--;
1090 count++;
f6d6d4fc 1091 } else {
1defeac9 1092 iomap_valid = 0;
1da177e4 1093 }
d5cb48aa
CH
1094 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1095 (unmapped || startio)) {
1096 iomap_valid = 0;
1da177e4 1097 }
f6d6d4fc
CH
1098
1099 if (!iohead)
1100 iohead = ioend;
1101
1102 } while (offset += len, ((bh = bh->b_this_page) != head));
1da177e4
LT
1103
1104 if (uptodate && bh == head)
1105 SetPageUptodate(page);
1106
f6d6d4fc
CH
1107 if (startio)
1108 xfs_start_page_writeback(page, wbc, 1, count);
1da177e4 1109
1defeac9
CH
1110 if (ioend && iomap_valid) {
1111 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1da177e4 1112 PAGE_CACHE_SHIFT;
775bf6c9 1113 tlast = min_t(pgoff_t, offset, last_index);
1defeac9 1114 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
6c4fe19f 1115 wbc, startio, all_bh, tlast);
1da177e4
LT
1116 }
1117
f6d6d4fc
CH
1118 if (iohead)
1119 xfs_submit_ioend(iohead);
1120
1da177e4
LT
1121 return page_dirty;
1122
1123error:
f6d6d4fc
CH
1124 if (iohead)
1125 xfs_cancel_ioend(iohead);
1da177e4
LT
1126
1127 /*
1128 * If it's delalloc and we have nowhere to put it,
1129 * throw it away, unless the lower layers told
1130 * us to try again.
1131 */
1132 if (err != -EAGAIN) {
f6d6d4fc 1133 if (!unmapped)
1da177e4 1134 block_invalidatepage(page, 0);
1da177e4
LT
1135 ClearPageUptodate(page);
1136 }
1137 return err;
1138}
1139
f51623b2
NS
1140/*
1141 * writepage: Called from one of two places:
1142 *
1143 * 1. we are flushing a delalloc buffer head.
1144 *
1145 * 2. we are writing out a dirty page. Typically the page dirty
1146 * state is cleared before we get here. In this case is it
1147 * conceivable we have no buffer heads.
1148 *
1149 * For delalloc space on the page we need to allocate space and
1150 * flush it. For unmapped buffer heads on the page we should
1151 * allocate space if the page is uptodate. For any other dirty
1152 * buffer heads on the page we should flush them.
1153 *
1154 * If we detect that a transaction would be required to flush
1155 * the page, we have to check the process flags first, if we
1156 * are already in a transaction or disk I/O during allocations
1157 * is off, we need to fail the writepage and redirty the page.
1158 */
1159
1160STATIC int
e4c573bb 1161xfs_vm_writepage(
f51623b2
NS
1162 struct page *page,
1163 struct writeback_control *wbc)
1164{
1165 int error;
1166 int need_trans;
1167 int delalloc, unmapped, unwritten;
1168 struct inode *inode = page->mapping->host;
1169
1170 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1171
1172 /*
1173 * We need a transaction if:
1174 * 1. There are delalloc buffers on the page
1175 * 2. The page is uptodate and we have unmapped buffers
1176 * 3. The page is uptodate and we have no buffers
1177 * 4. There are unwritten buffers on the page
1178 */
1179
1180 if (!page_has_buffers(page)) {
1181 unmapped = 1;
1182 need_trans = 1;
1183 } else {
1184 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1185 if (!PageUptodate(page))
1186 unmapped = 0;
1187 need_trans = delalloc + unmapped + unwritten;
1188 }
1189
1190 /*
1191 * If we need a transaction and the process flags say
1192 * we are already in a transaction, or no IO is allowed
1193 * then mark the page dirty again and leave the page
1194 * as is.
1195 */
59c1b082 1196 if (current_test_flags(PF_FSTRANS) && need_trans)
f51623b2
NS
1197 goto out_fail;
1198
1199 /*
1200 * Delay hooking up buffer heads until we have
1201 * made our go/no-go decision.
1202 */
1203 if (!page_has_buffers(page))
1204 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1205
1206 /*
1207 * Convert delayed allocate, unwritten or unmapped space
1208 * to real space and flush out to disk.
1209 */
1210 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1211 if (error == -EAGAIN)
1212 goto out_fail;
1213 if (unlikely(error < 0))
1214 goto out_unlock;
1215
1216 return 0;
1217
1218out_fail:
1219 redirty_page_for_writepage(wbc, page);
1220 unlock_page(page);
1221 return 0;
1222out_unlock:
1223 unlock_page(page);
1224 return error;
1225}
1226
7d4fb40a
NS
1227STATIC int
1228xfs_vm_writepages(
1229 struct address_space *mapping,
1230 struct writeback_control *wbc)
1231{
67fcaa73 1232 struct bhv_vnode *vp = vn_from_inode(mapping->host);
7d4fb40a
NS
1233
1234 if (VN_TRUNC(vp))
1235 VUNTRUNCATE(vp);
1236 return generic_writepages(mapping, wbc);
1237}
1238
f51623b2
NS
1239/*
1240 * Called to move a page into cleanable state - and from there
1241 * to be released. Possibly the page is already clean. We always
1242 * have buffer heads in this call.
1243 *
1244 * Returns 0 if the page is ok to release, 1 otherwise.
1245 *
1246 * Possible scenarios are:
1247 *
1248 * 1. We are being called to release a page which has been written
1249 * to via regular I/O. buffer heads will be dirty and possibly
1250 * delalloc. If no delalloc buffer heads in this case then we
1251 * can just return zero.
1252 *
1253 * 2. We are called to release a page which has been written via
1254 * mmap, all we need to do is ensure there is no delalloc
1255 * state in the buffer heads, if not we can let the caller
1256 * free them and we should come back later via writepage.
1257 */
1258STATIC int
238f4c54 1259xfs_vm_releasepage(
f51623b2
NS
1260 struct page *page,
1261 gfp_t gfp_mask)
1262{
1263 struct inode *inode = page->mapping->host;
1264 int dirty, delalloc, unmapped, unwritten;
1265 struct writeback_control wbc = {
1266 .sync_mode = WB_SYNC_ALL,
1267 .nr_to_write = 1,
1268 };
1269
ed9d88f7 1270 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
f51623b2 1271
238f4c54
NS
1272 if (!page_has_buffers(page))
1273 return 0;
1274
f51623b2
NS
1275 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1276 if (!delalloc && !unwritten)
1277 goto free_buffers;
1278
1279 if (!(gfp_mask & __GFP_FS))
1280 return 0;
1281
1282 /* If we are already inside a transaction or the thread cannot
1283 * do I/O, we cannot release this page.
1284 */
59c1b082 1285 if (current_test_flags(PF_FSTRANS))
f51623b2
NS
1286 return 0;
1287
1288 /*
1289 * Convert delalloc space to real space, do not flush the
1290 * data out to disk, that will be done by the caller.
1291 * Never need to allocate space here - we will always
1292 * come back to writepage in that case.
1293 */
1294 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1295 if (dirty == 0 && !unwritten)
1296 goto free_buffers;
1297 return 0;
1298
1299free_buffers:
1300 return try_to_free_buffers(page);
1301}
1302
1da177e4 1303STATIC int
c2536668 1304__xfs_get_blocks(
1da177e4
LT
1305 struct inode *inode,
1306 sector_t iblock,
1da177e4
LT
1307 struct buffer_head *bh_result,
1308 int create,
1309 int direct,
1310 bmapi_flags_t flags)
1311{
67fcaa73 1312 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 1313 xfs_iomap_t iomap;
fdc7ed75
NS
1314 xfs_off_t offset;
1315 ssize_t size;
c2536668 1316 int niomap = 1;
1da177e4 1317 int error;
1da177e4 1318
fdc7ed75 1319 offset = (xfs_off_t)iblock << inode->i_blkbits;
c2536668
NS
1320 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1321 size = bh_result->b_size;
67fcaa73
NS
1322 error = bhv_vop_bmap(vp, offset, size,
1323 create ? flags : BMAPI_READ, &iomap, &niomap);
1da177e4
LT
1324 if (error)
1325 return -error;
c2536668 1326 if (niomap == 0)
1da177e4
LT
1327 return 0;
1328
1329 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
87cbc49c
NS
1330 /*
1331 * For unwritten extents do not report a disk address on
1da177e4
LT
1332 * the read case (treat as if we're reading into a hole).
1333 */
1334 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
87cbc49c
NS
1335 xfs_map_buffer(bh_result, &iomap, offset,
1336 inode->i_blkbits);
1da177e4
LT
1337 }
1338 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1339 if (direct)
1340 bh_result->b_private = inode;
1341 set_buffer_unwritten(bh_result);
1da177e4
LT
1342 }
1343 }
1344
c2536668
NS
1345 /*
1346 * If this is a realtime file, data may be on a different device.
1347 * to that pointed to from the buffer_head b_bdev currently.
1348 */
ce8e922c 1349 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1da177e4 1350
c2536668 1351 /*
549054af
DC
1352 * If we previously allocated a block out beyond eof and we are now
1353 * coming back to use it then we will need to flag it as new even if it
1354 * has a disk address.
1355 *
1356 * With sub-block writes into unwritten extents we also need to mark
1357 * the buffer as new so that the unwritten parts of the buffer gets
1358 * correctly zeroed.
1da177e4
LT
1359 */
1360 if (create &&
1361 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
549054af
DC
1362 (offset >= i_size_read(inode)) ||
1363 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1da177e4 1364 set_buffer_new(bh_result);
1da177e4
LT
1365
1366 if (iomap.iomap_flags & IOMAP_DELAY) {
1367 BUG_ON(direct);
1368 if (create) {
1369 set_buffer_uptodate(bh_result);
1370 set_buffer_mapped(bh_result);
1371 set_buffer_delay(bh_result);
1372 }
1373 }
1374
c2536668 1375 if (direct || size > (1 << inode->i_blkbits)) {
fdc7ed75
NS
1376 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1377 offset = min_t(xfs_off_t,
c2536668
NS
1378 iomap.iomap_bsize - iomap.iomap_delta, size);
1379 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1da177e4
LT
1380 }
1381
1382 return 0;
1383}
1384
1385int
c2536668 1386xfs_get_blocks(
1da177e4
LT
1387 struct inode *inode,
1388 sector_t iblock,
1389 struct buffer_head *bh_result,
1390 int create)
1391{
c2536668 1392 return __xfs_get_blocks(inode, iblock,
fa30bd05 1393 bh_result, create, 0, BMAPI_WRITE);
1da177e4
LT
1394}
1395
1396STATIC int
e4c573bb 1397xfs_get_blocks_direct(
1da177e4
LT
1398 struct inode *inode,
1399 sector_t iblock,
1da177e4
LT
1400 struct buffer_head *bh_result,
1401 int create)
1402{
c2536668 1403 return __xfs_get_blocks(inode, iblock,
1d8fa7a2 1404 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1da177e4
LT
1405}
1406
f0973863 1407STATIC void
e4c573bb 1408xfs_end_io_direct(
f0973863
CH
1409 struct kiocb *iocb,
1410 loff_t offset,
1411 ssize_t size,
1412 void *private)
1413{
1414 xfs_ioend_t *ioend = iocb->private;
1415
1416 /*
1417 * Non-NULL private data means we need to issue a transaction to
1418 * convert a range from unwritten to written extents. This needs
c41564b5 1419 * to happen from process context but aio+dio I/O completion
f0973863 1420 * happens from irq context so we need to defer it to a workqueue.
c41564b5 1421 * This is not necessary for synchronous direct I/O, but we do
f0973863
CH
1422 * it anyway to keep the code uniform and simpler.
1423 *
e927af90
DC
1424 * Well, if only it were that simple. Because synchronous direct I/O
1425 * requires extent conversion to occur *before* we return to userspace,
1426 * we have to wait for extent conversion to complete. Look at the
1427 * iocb that has been passed to us to determine if this is AIO or
1428 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1429 * workqueue and wait for it to complete.
1430 *
f0973863
CH
1431 * The core direct I/O code might be changed to always call the
1432 * completion handler in the future, in which case all this can
1433 * go away.
1434 */
ba87ea69
LM
1435 ioend->io_offset = offset;
1436 ioend->io_size = size;
1437 if (ioend->io_type == IOMAP_READ) {
e927af90 1438 xfs_finish_ioend(ioend, 0);
ba87ea69 1439 } else if (private && size > 0) {
e927af90 1440 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
f0973863 1441 } else {
ba87ea69
LM
1442 /*
1443 * A direct I/O write ioend starts it's life in unwritten
1444 * state in case they map an unwritten extent. This write
1445 * didn't map an unwritten extent so switch it's completion
1446 * handler.
1447 */
1448 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
e927af90 1449 xfs_finish_ioend(ioend, 0);
f0973863
CH
1450 }
1451
1452 /*
c41564b5 1453 * blockdev_direct_IO can return an error even after the I/O
f0973863
CH
1454 * completion handler was called. Thus we need to protect
1455 * against double-freeing.
1456 */
1457 iocb->private = NULL;
1458}
1459
1da177e4 1460STATIC ssize_t
e4c573bb 1461xfs_vm_direct_IO(
1da177e4
LT
1462 int rw,
1463 struct kiocb *iocb,
1464 const struct iovec *iov,
1465 loff_t offset,
1466 unsigned long nr_segs)
1467{
1468 struct file *file = iocb->ki_filp;
1469 struct inode *inode = file->f_mapping->host;
67fcaa73 1470 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4
LT
1471 xfs_iomap_t iomap;
1472 int maps = 1;
1473 int error;
f0973863 1474 ssize_t ret;
1da177e4 1475
67fcaa73 1476 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
1da177e4
LT
1477 if (error)
1478 return -error;
1479
721259bc 1480 if (rw == WRITE) {
ba87ea69 1481 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
721259bc
LM
1482 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1483 iomap.iomap_target->bt_bdev,
1484 iov, offset, nr_segs,
1485 xfs_get_blocks_direct,
1486 xfs_end_io_direct);
1487 } else {
ba87ea69 1488 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
721259bc
LM
1489 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1490 iomap.iomap_target->bt_bdev,
1491 iov, offset, nr_segs,
1492 xfs_get_blocks_direct,
1493 xfs_end_io_direct);
1494 }
f0973863 1495
8459d86a 1496 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
f0973863
CH
1497 xfs_destroy_ioend(iocb->private);
1498 return ret;
1da177e4
LT
1499}
1500
f51623b2 1501STATIC int
e4c573bb 1502xfs_vm_prepare_write(
f51623b2
NS
1503 struct file *file,
1504 struct page *page,
1505 unsigned int from,
1506 unsigned int to)
1507{
c2536668 1508 return block_prepare_write(page, from, to, xfs_get_blocks);
f51623b2 1509}
1da177e4
LT
1510
1511STATIC sector_t
e4c573bb 1512xfs_vm_bmap(
1da177e4
LT
1513 struct address_space *mapping,
1514 sector_t block)
1515{
1516 struct inode *inode = (struct inode *)mapping->host;
67fcaa73 1517 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 1518
e4c573bb 1519 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
67fcaa73
NS
1520 bhv_vop_rwlock(vp, VRWLOCK_READ);
1521 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1522 bhv_vop_rwunlock(vp, VRWLOCK_READ);
c2536668 1523 return generic_block_bmap(mapping, block, xfs_get_blocks);
1da177e4
LT
1524}
1525
1526STATIC int
e4c573bb 1527xfs_vm_readpage(
1da177e4
LT
1528 struct file *unused,
1529 struct page *page)
1530{
c2536668 1531 return mpage_readpage(page, xfs_get_blocks);
1da177e4
LT
1532}
1533
1534STATIC int
e4c573bb 1535xfs_vm_readpages(
1da177e4
LT
1536 struct file *unused,
1537 struct address_space *mapping,
1538 struct list_head *pages,
1539 unsigned nr_pages)
1540{
c2536668 1541 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1da177e4
LT
1542}
1543
2ff28e22 1544STATIC void
238f4c54 1545xfs_vm_invalidatepage(
bcec2b7f
NS
1546 struct page *page,
1547 unsigned long offset)
1548{
1549 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1550 page->mapping->host, page, offset);
2ff28e22 1551 block_invalidatepage(page, offset);
bcec2b7f
NS
1552}
1553
f5e54d6e 1554const struct address_space_operations xfs_address_space_operations = {
e4c573bb
NS
1555 .readpage = xfs_vm_readpage,
1556 .readpages = xfs_vm_readpages,
1557 .writepage = xfs_vm_writepage,
7d4fb40a 1558 .writepages = xfs_vm_writepages,
1da177e4 1559 .sync_page = block_sync_page,
238f4c54
NS
1560 .releasepage = xfs_vm_releasepage,
1561 .invalidatepage = xfs_vm_invalidatepage,
e4c573bb 1562 .prepare_write = xfs_vm_prepare_write,
1da177e4 1563 .commit_write = generic_commit_write,
e4c573bb
NS
1564 .bmap = xfs_vm_bmap,
1565 .direct_IO = xfs_vm_direct_IO,
e965f963 1566 .migratepage = buffer_migrate_page,
1da177e4 1567};
This page took 0.343798 seconds and 5 git commands to generate.