6feecd27947048f5d074b9bf86d5c94add89142c
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_aops.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_inum.h"
22 #include "xfs_sb.h"
23 #include "xfs_ag.h"
24 #include "xfs_dir2.h"
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
38 #include "xfs_rw.h"
39 #include "xfs_iomap.h"
40 #include "xfs_vnodeops.h"
41 #include "xfs_trace.h"
42 #include "xfs_bmap.h"
43 #include <linux/gfp.h>
44 #include <linux/mpage.h>
45 #include <linux/pagevec.h>
46 #include <linux/writeback.h>
47
48 /*
49 * Types of I/O for bmap clustering and I/O completion tracking.
50 */
51 enum {
52 IO_READ, /* mapping for a read */
53 IO_DELAY, /* mapping covers delalloc region */
54 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
55 IO_NEW /* just allocated */
56 };
57
58 /*
59 * Prime number of hash buckets since address is used as the key.
60 */
61 #define NVSYNC 37
62 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
63 static wait_queue_head_t xfs_ioend_wq[NVSYNC];
64
65 void __init
66 xfs_ioend_init(void)
67 {
68 int i;
69
70 for (i = 0; i < NVSYNC; i++)
71 init_waitqueue_head(&xfs_ioend_wq[i]);
72 }
73
74 void
75 xfs_ioend_wait(
76 xfs_inode_t *ip)
77 {
78 wait_queue_head_t *wq = to_ioend_wq(ip);
79
80 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
81 }
82
83 STATIC void
84 xfs_ioend_wake(
85 xfs_inode_t *ip)
86 {
87 if (atomic_dec_and_test(&ip->i_iocount))
88 wake_up(to_ioend_wq(ip));
89 }
90
91 void
92 xfs_count_page_state(
93 struct page *page,
94 int *delalloc,
95 int *unmapped,
96 int *unwritten)
97 {
98 struct buffer_head *bh, *head;
99
100 *delalloc = *unmapped = *unwritten = 0;
101
102 bh = head = page_buffers(page);
103 do {
104 if (buffer_uptodate(bh) && !buffer_mapped(bh))
105 (*unmapped) = 1;
106 else if (buffer_unwritten(bh))
107 (*unwritten) = 1;
108 else if (buffer_delay(bh))
109 (*delalloc) = 1;
110 } while ((bh = bh->b_this_page) != head);
111 }
112
113 STATIC struct block_device *
114 xfs_find_bdev_for_inode(
115 struct inode *inode)
116 {
117 struct xfs_inode *ip = XFS_I(inode);
118 struct xfs_mount *mp = ip->i_mount;
119
120 if (XFS_IS_REALTIME_INODE(ip))
121 return mp->m_rtdev_targp->bt_bdev;
122 else
123 return mp->m_ddev_targp->bt_bdev;
124 }
125
126 /*
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
131 */
132 STATIC void
133 xfs_destroy_ioend(
134 xfs_ioend_t *ioend)
135 {
136 struct buffer_head *bh, *next;
137 struct xfs_inode *ip = XFS_I(ioend->io_inode);
138
139 for (bh = ioend->io_buffer_head; bh; bh = next) {
140 next = bh->b_private;
141 bh->b_end_io(bh, !ioend->io_error);
142 }
143
144 /*
145 * Volume managers supporting multiple paths can send back ENODEV
146 * when the final path disappears. In this case continuing to fill
147 * the page cache with dirty data which cannot be written out is
148 * evil, so prevent that.
149 */
150 if (unlikely(ioend->io_error == -ENODEV)) {
151 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
152 __FILE__, __LINE__);
153 }
154
155 xfs_ioend_wake(ip);
156 mempool_free(ioend, xfs_ioend_pool);
157 }
158
159 /*
160 * If the end of the current ioend is beyond the current EOF,
161 * return the new EOF value, otherwise zero.
162 */
163 STATIC xfs_fsize_t
164 xfs_ioend_new_eof(
165 xfs_ioend_t *ioend)
166 {
167 xfs_inode_t *ip = XFS_I(ioend->io_inode);
168 xfs_fsize_t isize;
169 xfs_fsize_t bsize;
170
171 bsize = ioend->io_offset + ioend->io_size;
172 isize = MAX(ip->i_size, ip->i_new_size);
173 isize = MIN(isize, bsize);
174 return isize > ip->i_d.di_size ? isize : 0;
175 }
176
177 /*
178 * Update on-disk file size now that data has been written to disk. The
179 * current in-memory file size is i_size. If a write is beyond eof i_new_size
180 * will be the intended file size until i_size is updated. If this write does
181 * not extend all the way to the valid file size then restrict this update to
182 * the end of the write.
183 *
184 * This function does not block as blocking on the inode lock in IO completion
185 * can lead to IO completion order dependency deadlocks.. If it can't get the
186 * inode ilock it will return EAGAIN. Callers must handle this.
187 */
188 STATIC int
189 xfs_setfilesize(
190 xfs_ioend_t *ioend)
191 {
192 xfs_inode_t *ip = XFS_I(ioend->io_inode);
193 xfs_fsize_t isize;
194
195 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
196 ASSERT(ioend->io_type != IO_READ);
197
198 if (unlikely(ioend->io_error))
199 return 0;
200
201 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
202 return EAGAIN;
203
204 isize = xfs_ioend_new_eof(ioend);
205 if (isize) {
206 ip->i_d.di_size = isize;
207 xfs_mark_inode_dirty(ip);
208 }
209
210 xfs_iunlock(ip, XFS_ILOCK_EXCL);
211 return 0;
212 }
213
214 /*
215 * Schedule IO completion handling on a xfsdatad if this was
216 * the final hold on this ioend. If we are asked to wait,
217 * flush the workqueue.
218 */
219 STATIC void
220 xfs_finish_ioend(
221 xfs_ioend_t *ioend,
222 int wait)
223 {
224 if (atomic_dec_and_test(&ioend->io_remaining)) {
225 struct workqueue_struct *wq;
226
227 wq = (ioend->io_type == IO_UNWRITTEN) ?
228 xfsconvertd_workqueue : xfsdatad_workqueue;
229 queue_work(wq, &ioend->io_work);
230 if (wait)
231 flush_workqueue(wq);
232 }
233 }
234
235 /*
236 * IO write completion.
237 */
238 STATIC void
239 xfs_end_io(
240 struct work_struct *work)
241 {
242 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
243 struct xfs_inode *ip = XFS_I(ioend->io_inode);
244 int error = 0;
245
246 /*
247 * For unwritten extents we need to issue transactions to convert a
248 * range to normal written extens after the data I/O has finished.
249 */
250 if (ioend->io_type == IO_UNWRITTEN &&
251 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
252
253 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
254 ioend->io_size);
255 if (error)
256 ioend->io_error = error;
257 }
258
259 /*
260 * We might have to update the on-disk file size after extending
261 * writes.
262 */
263 if (ioend->io_type != IO_READ) {
264 error = xfs_setfilesize(ioend);
265 ASSERT(!error || error == EAGAIN);
266 }
267
268 /*
269 * If we didn't complete processing of the ioend, requeue it to the
270 * tail of the workqueue for another attempt later. Otherwise destroy
271 * it.
272 */
273 if (error == EAGAIN) {
274 atomic_inc(&ioend->io_remaining);
275 xfs_finish_ioend(ioend, 0);
276 /* ensure we don't spin on blocked ioends */
277 delay(1);
278 } else
279 xfs_destroy_ioend(ioend);
280 }
281
282 /*
283 * Allocate and initialise an IO completion structure.
284 * We need to track unwritten extent write completion here initially.
285 * We'll need to extend this for updating the ondisk inode size later
286 * (vs. incore size).
287 */
288 STATIC xfs_ioend_t *
289 xfs_alloc_ioend(
290 struct inode *inode,
291 unsigned int type)
292 {
293 xfs_ioend_t *ioend;
294
295 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
296
297 /*
298 * Set the count to 1 initially, which will prevent an I/O
299 * completion callback from happening before we have started
300 * all the I/O from calling the completion routine too early.
301 */
302 atomic_set(&ioend->io_remaining, 1);
303 ioend->io_error = 0;
304 ioend->io_list = NULL;
305 ioend->io_type = type;
306 ioend->io_inode = inode;
307 ioend->io_buffer_head = NULL;
308 ioend->io_buffer_tail = NULL;
309 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
310 ioend->io_offset = 0;
311 ioend->io_size = 0;
312
313 INIT_WORK(&ioend->io_work, xfs_end_io);
314 return ioend;
315 }
316
317 STATIC int
318 xfs_map_blocks(
319 struct inode *inode,
320 loff_t offset,
321 ssize_t count,
322 struct xfs_bmbt_irec *imap,
323 int flags)
324 {
325 int nmaps = 1;
326 int new = 0;
327
328 return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
329 }
330
331 STATIC int
332 xfs_imap_valid(
333 struct inode *inode,
334 struct xfs_bmbt_irec *imap,
335 xfs_off_t offset)
336 {
337 offset >>= inode->i_blkbits;
338
339 return offset >= imap->br_startoff &&
340 offset < imap->br_startoff + imap->br_blockcount;
341 }
342
343 /*
344 * BIO completion handler for buffered IO.
345 */
346 STATIC void
347 xfs_end_bio(
348 struct bio *bio,
349 int error)
350 {
351 xfs_ioend_t *ioend = bio->bi_private;
352
353 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
354 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
355
356 /* Toss bio and pass work off to an xfsdatad thread */
357 bio->bi_private = NULL;
358 bio->bi_end_io = NULL;
359 bio_put(bio);
360
361 xfs_finish_ioend(ioend, 0);
362 }
363
364 STATIC void
365 xfs_submit_ioend_bio(
366 struct writeback_control *wbc,
367 xfs_ioend_t *ioend,
368 struct bio *bio)
369 {
370 atomic_inc(&ioend->io_remaining);
371 bio->bi_private = ioend;
372 bio->bi_end_io = xfs_end_bio;
373
374 /*
375 * If the I/O is beyond EOF we mark the inode dirty immediately
376 * but don't update the inode size until I/O completion.
377 */
378 if (xfs_ioend_new_eof(ioend))
379 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
380
381 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
382 WRITE_SYNC_PLUG : WRITE, bio);
383 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
384 bio_put(bio);
385 }
386
387 STATIC struct bio *
388 xfs_alloc_ioend_bio(
389 struct buffer_head *bh)
390 {
391 struct bio *bio;
392 int nvecs = bio_get_nr_vecs(bh->b_bdev);
393
394 do {
395 bio = bio_alloc(GFP_NOIO, nvecs);
396 nvecs >>= 1;
397 } while (!bio);
398
399 ASSERT(bio->bi_private == NULL);
400 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
401 bio->bi_bdev = bh->b_bdev;
402 bio_get(bio);
403 return bio;
404 }
405
406 STATIC void
407 xfs_start_buffer_writeback(
408 struct buffer_head *bh)
409 {
410 ASSERT(buffer_mapped(bh));
411 ASSERT(buffer_locked(bh));
412 ASSERT(!buffer_delay(bh));
413 ASSERT(!buffer_unwritten(bh));
414
415 mark_buffer_async_write(bh);
416 set_buffer_uptodate(bh);
417 clear_buffer_dirty(bh);
418 }
419
420 STATIC void
421 xfs_start_page_writeback(
422 struct page *page,
423 int clear_dirty,
424 int buffers)
425 {
426 ASSERT(PageLocked(page));
427 ASSERT(!PageWriteback(page));
428 if (clear_dirty)
429 clear_page_dirty_for_io(page);
430 set_page_writeback(page);
431 unlock_page(page);
432 /* If no buffers on the page are to be written, finish it here */
433 if (!buffers)
434 end_page_writeback(page);
435 }
436
437 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
438 {
439 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
440 }
441
442 /*
443 * Submit all of the bios for all of the ioends we have saved up, covering the
444 * initial writepage page and also any probed pages.
445 *
446 * Because we may have multiple ioends spanning a page, we need to start
447 * writeback on all the buffers before we submit them for I/O. If we mark the
448 * buffers as we got, then we can end up with a page that only has buffers
449 * marked async write and I/O complete on can occur before we mark the other
450 * buffers async write.
451 *
452 * The end result of this is that we trip a bug in end_page_writeback() because
453 * we call it twice for the one page as the code in end_buffer_async_write()
454 * assumes that all buffers on the page are started at the same time.
455 *
456 * The fix is two passes across the ioend list - one to start writeback on the
457 * buffer_heads, and then submit them for I/O on the second pass.
458 */
459 STATIC void
460 xfs_submit_ioend(
461 struct writeback_control *wbc,
462 xfs_ioend_t *ioend)
463 {
464 xfs_ioend_t *head = ioend;
465 xfs_ioend_t *next;
466 struct buffer_head *bh;
467 struct bio *bio;
468 sector_t lastblock = 0;
469
470 /* Pass 1 - start writeback */
471 do {
472 next = ioend->io_list;
473 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
474 xfs_start_buffer_writeback(bh);
475 }
476 } while ((ioend = next) != NULL);
477
478 /* Pass 2 - submit I/O */
479 ioend = head;
480 do {
481 next = ioend->io_list;
482 bio = NULL;
483
484 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
485
486 if (!bio) {
487 retry:
488 bio = xfs_alloc_ioend_bio(bh);
489 } else if (bh->b_blocknr != lastblock + 1) {
490 xfs_submit_ioend_bio(wbc, ioend, bio);
491 goto retry;
492 }
493
494 if (bio_add_buffer(bio, bh) != bh->b_size) {
495 xfs_submit_ioend_bio(wbc, ioend, bio);
496 goto retry;
497 }
498
499 lastblock = bh->b_blocknr;
500 }
501 if (bio)
502 xfs_submit_ioend_bio(wbc, ioend, bio);
503 xfs_finish_ioend(ioend, 0);
504 } while ((ioend = next) != NULL);
505 }
506
507 /*
508 * Cancel submission of all buffer_heads so far in this endio.
509 * Toss the endio too. Only ever called for the initial page
510 * in a writepage request, so only ever one page.
511 */
512 STATIC void
513 xfs_cancel_ioend(
514 xfs_ioend_t *ioend)
515 {
516 xfs_ioend_t *next;
517 struct buffer_head *bh, *next_bh;
518
519 do {
520 next = ioend->io_list;
521 bh = ioend->io_buffer_head;
522 do {
523 next_bh = bh->b_private;
524 clear_buffer_async_write(bh);
525 unlock_buffer(bh);
526 } while ((bh = next_bh) != NULL);
527
528 xfs_ioend_wake(XFS_I(ioend->io_inode));
529 mempool_free(ioend, xfs_ioend_pool);
530 } while ((ioend = next) != NULL);
531 }
532
533 /*
534 * Test to see if we've been building up a completion structure for
535 * earlier buffers -- if so, we try to append to this ioend if we
536 * can, otherwise we finish off any current ioend and start another.
537 * Return true if we've finished the given ioend.
538 */
539 STATIC void
540 xfs_add_to_ioend(
541 struct inode *inode,
542 struct buffer_head *bh,
543 xfs_off_t offset,
544 unsigned int type,
545 xfs_ioend_t **result,
546 int need_ioend)
547 {
548 xfs_ioend_t *ioend = *result;
549
550 if (!ioend || need_ioend || type != ioend->io_type) {
551 xfs_ioend_t *previous = *result;
552
553 ioend = xfs_alloc_ioend(inode, type);
554 ioend->io_offset = offset;
555 ioend->io_buffer_head = bh;
556 ioend->io_buffer_tail = bh;
557 if (previous)
558 previous->io_list = ioend;
559 *result = ioend;
560 } else {
561 ioend->io_buffer_tail->b_private = bh;
562 ioend->io_buffer_tail = bh;
563 }
564
565 bh->b_private = NULL;
566 ioend->io_size += bh->b_size;
567 }
568
569 STATIC void
570 xfs_map_buffer(
571 struct inode *inode,
572 struct buffer_head *bh,
573 struct xfs_bmbt_irec *imap,
574 xfs_off_t offset)
575 {
576 sector_t bn;
577 struct xfs_mount *m = XFS_I(inode)->i_mount;
578 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
579 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
580
581 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
582 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
583
584 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
585 ((offset - iomap_offset) >> inode->i_blkbits);
586
587 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
588
589 bh->b_blocknr = bn;
590 set_buffer_mapped(bh);
591 }
592
593 STATIC void
594 xfs_map_at_offset(
595 struct inode *inode,
596 struct buffer_head *bh,
597 struct xfs_bmbt_irec *imap,
598 xfs_off_t offset)
599 {
600 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
601 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
602
603 lock_buffer(bh);
604 xfs_map_buffer(inode, bh, imap, offset);
605 bh->b_bdev = xfs_find_bdev_for_inode(inode);
606 set_buffer_mapped(bh);
607 clear_buffer_delay(bh);
608 clear_buffer_unwritten(bh);
609 }
610
611 /*
612 * Look for a page at index that is suitable for clustering.
613 */
614 STATIC unsigned int
615 xfs_probe_page(
616 struct page *page,
617 unsigned int pg_offset,
618 int mapped)
619 {
620 int ret = 0;
621
622 if (PageWriteback(page))
623 return 0;
624
625 if (page->mapping && PageDirty(page)) {
626 if (page_has_buffers(page)) {
627 struct buffer_head *bh, *head;
628
629 bh = head = page_buffers(page);
630 do {
631 if (!buffer_uptodate(bh))
632 break;
633 if (mapped != buffer_mapped(bh))
634 break;
635 ret += bh->b_size;
636 if (ret >= pg_offset)
637 break;
638 } while ((bh = bh->b_this_page) != head);
639 } else
640 ret = mapped ? 0 : PAGE_CACHE_SIZE;
641 }
642
643 return ret;
644 }
645
646 STATIC size_t
647 xfs_probe_cluster(
648 struct inode *inode,
649 struct page *startpage,
650 struct buffer_head *bh,
651 struct buffer_head *head,
652 int mapped)
653 {
654 struct pagevec pvec;
655 pgoff_t tindex, tlast, tloff;
656 size_t total = 0;
657 int done = 0, i;
658
659 /* First sum forwards in this page */
660 do {
661 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
662 return total;
663 total += bh->b_size;
664 } while ((bh = bh->b_this_page) != head);
665
666 /* if we reached the end of the page, sum forwards in following pages */
667 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
668 tindex = startpage->index + 1;
669
670 /* Prune this back to avoid pathological behavior */
671 tloff = min(tlast, startpage->index + 64);
672
673 pagevec_init(&pvec, 0);
674 while (!done && tindex <= tloff) {
675 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
676
677 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
678 break;
679
680 for (i = 0; i < pagevec_count(&pvec); i++) {
681 struct page *page = pvec.pages[i];
682 size_t pg_offset, pg_len = 0;
683
684 if (tindex == tlast) {
685 pg_offset =
686 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
687 if (!pg_offset) {
688 done = 1;
689 break;
690 }
691 } else
692 pg_offset = PAGE_CACHE_SIZE;
693
694 if (page->index == tindex && trylock_page(page)) {
695 pg_len = xfs_probe_page(page, pg_offset, mapped);
696 unlock_page(page);
697 }
698
699 if (!pg_len) {
700 done = 1;
701 break;
702 }
703
704 total += pg_len;
705 tindex++;
706 }
707
708 pagevec_release(&pvec);
709 cond_resched();
710 }
711
712 return total;
713 }
714
715 /*
716 * Test if a given page is suitable for writing as part of an unwritten
717 * or delayed allocate extent.
718 */
719 STATIC int
720 xfs_is_delayed_page(
721 struct page *page,
722 unsigned int type)
723 {
724 if (PageWriteback(page))
725 return 0;
726
727 if (page->mapping && page_has_buffers(page)) {
728 struct buffer_head *bh, *head;
729 int acceptable = 0;
730
731 bh = head = page_buffers(page);
732 do {
733 if (buffer_unwritten(bh))
734 acceptable = (type == IO_UNWRITTEN);
735 else if (buffer_delay(bh))
736 acceptable = (type == IO_DELAY);
737 else if (buffer_dirty(bh) && buffer_mapped(bh))
738 acceptable = (type == IO_NEW);
739 else
740 break;
741 } while ((bh = bh->b_this_page) != head);
742
743 if (acceptable)
744 return 1;
745 }
746
747 return 0;
748 }
749
750 /*
751 * Allocate & map buffers for page given the extent map. Write it out.
752 * except for the original page of a writepage, this is called on
753 * delalloc/unwritten pages only, for the original page it is possible
754 * that the page has no mapping at all.
755 */
756 STATIC int
757 xfs_convert_page(
758 struct inode *inode,
759 struct page *page,
760 loff_t tindex,
761 struct xfs_bmbt_irec *imap,
762 xfs_ioend_t **ioendp,
763 struct writeback_control *wbc,
764 int startio,
765 int all_bh)
766 {
767 struct buffer_head *bh, *head;
768 xfs_off_t end_offset;
769 unsigned long p_offset;
770 unsigned int type;
771 int len, page_dirty;
772 int count = 0, done = 0, uptodate = 1;
773 xfs_off_t offset = page_offset(page);
774
775 if (page->index != tindex)
776 goto fail;
777 if (!trylock_page(page))
778 goto fail;
779 if (PageWriteback(page))
780 goto fail_unlock_page;
781 if (page->mapping != inode->i_mapping)
782 goto fail_unlock_page;
783 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
784 goto fail_unlock_page;
785
786 /*
787 * page_dirty is initially a count of buffers on the page before
788 * EOF and is decremented as we move each into a cleanable state.
789 *
790 * Derivation:
791 *
792 * End offset is the highest offset that this page should represent.
793 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
794 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
795 * hence give us the correct page_dirty count. On any other page,
796 * it will be zero and in that case we need page_dirty to be the
797 * count of buffers on the page.
798 */
799 end_offset = min_t(unsigned long long,
800 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
801 i_size_read(inode));
802
803 len = 1 << inode->i_blkbits;
804 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
805 PAGE_CACHE_SIZE);
806 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
807 page_dirty = p_offset / len;
808
809 bh = head = page_buffers(page);
810 do {
811 if (offset >= end_offset)
812 break;
813 if (!buffer_uptodate(bh))
814 uptodate = 0;
815 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
816 done = 1;
817 continue;
818 }
819
820 if (buffer_unwritten(bh) || buffer_delay(bh)) {
821 if (buffer_unwritten(bh))
822 type = IO_UNWRITTEN;
823 else
824 type = IO_DELAY;
825
826 if (!xfs_imap_valid(inode, imap, offset)) {
827 done = 1;
828 continue;
829 }
830
831 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
832 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
833
834 xfs_map_at_offset(inode, bh, imap, offset);
835 if (startio) {
836 xfs_add_to_ioend(inode, bh, offset,
837 type, ioendp, done);
838 } else {
839 set_buffer_dirty(bh);
840 unlock_buffer(bh);
841 mark_buffer_dirty(bh);
842 }
843 page_dirty--;
844 count++;
845 } else {
846 type = IO_NEW;
847 if (buffer_mapped(bh) && all_bh && startio) {
848 lock_buffer(bh);
849 xfs_add_to_ioend(inode, bh, offset,
850 type, ioendp, done);
851 count++;
852 page_dirty--;
853 } else {
854 done = 1;
855 }
856 }
857 } while (offset += len, (bh = bh->b_this_page) != head);
858
859 if (uptodate && bh == head)
860 SetPageUptodate(page);
861
862 if (startio) {
863 if (count) {
864 wbc->nr_to_write--;
865 if (wbc->nr_to_write <= 0)
866 done = 1;
867 }
868 xfs_start_page_writeback(page, !page_dirty, count);
869 }
870
871 return done;
872 fail_unlock_page:
873 unlock_page(page);
874 fail:
875 return 1;
876 }
877
878 /*
879 * Convert & write out a cluster of pages in the same extent as defined
880 * by mp and following the start page.
881 */
882 STATIC void
883 xfs_cluster_write(
884 struct inode *inode,
885 pgoff_t tindex,
886 struct xfs_bmbt_irec *imap,
887 xfs_ioend_t **ioendp,
888 struct writeback_control *wbc,
889 int startio,
890 int all_bh,
891 pgoff_t tlast)
892 {
893 struct pagevec pvec;
894 int done = 0, i;
895
896 pagevec_init(&pvec, 0);
897 while (!done && tindex <= tlast) {
898 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
899
900 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
901 break;
902
903 for (i = 0; i < pagevec_count(&pvec); i++) {
904 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
905 imap, ioendp, wbc, startio, all_bh);
906 if (done)
907 break;
908 }
909
910 pagevec_release(&pvec);
911 cond_resched();
912 }
913 }
914
915 STATIC void
916 xfs_vm_invalidatepage(
917 struct page *page,
918 unsigned long offset)
919 {
920 trace_xfs_invalidatepage(page->mapping->host, page, offset);
921 block_invalidatepage(page, offset);
922 }
923
924 /*
925 * If the page has delalloc buffers on it, we need to punch them out before we
926 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
927 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
928 * is done on that same region - the delalloc extent is returned when none is
929 * supposed to be there.
930 *
931 * We prevent this by truncating away the delalloc regions on the page before
932 * invalidating it. Because they are delalloc, we can do this without needing a
933 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
934 * truncation without a transaction as there is no space left for block
935 * reservation (typically why we see a ENOSPC in writeback).
936 *
937 * This is not a performance critical path, so for now just do the punching a
938 * buffer head at a time.
939 */
940 STATIC void
941 xfs_aops_discard_page(
942 struct page *page)
943 {
944 struct inode *inode = page->mapping->host;
945 struct xfs_inode *ip = XFS_I(inode);
946 struct buffer_head *bh, *head;
947 loff_t offset = page_offset(page);
948 ssize_t len = 1 << inode->i_blkbits;
949
950 if (!xfs_is_delayed_page(page, IO_DELAY))
951 goto out_invalidate;
952
953 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
954 goto out_invalidate;
955
956 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
957 "page discard on page %p, inode 0x%llx, offset %llu.",
958 page, ip->i_ino, offset);
959
960 xfs_ilock(ip, XFS_ILOCK_EXCL);
961 bh = head = page_buffers(page);
962 do {
963 int done;
964 xfs_fileoff_t offset_fsb;
965 xfs_bmbt_irec_t imap;
966 int nimaps = 1;
967 int error;
968 xfs_fsblock_t firstblock;
969 xfs_bmap_free_t flist;
970
971 if (!buffer_delay(bh))
972 goto next_buffer;
973
974 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
975
976 /*
977 * Map the range first and check that it is a delalloc extent
978 * before trying to unmap the range. Otherwise we will be
979 * trying to remove a real extent (which requires a
980 * transaction) or a hole, which is probably a bad idea...
981 */
982 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
983 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
984 &nimaps, NULL, NULL);
985
986 if (error) {
987 /* something screwed, just bail */
988 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
989 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
990 "page discard failed delalloc mapping lookup.");
991 }
992 break;
993 }
994 if (!nimaps) {
995 /* nothing there */
996 goto next_buffer;
997 }
998 if (imap.br_startblock != DELAYSTARTBLOCK) {
999 /* been converted, ignore */
1000 goto next_buffer;
1001 }
1002 WARN_ON(imap.br_blockcount == 0);
1003
1004 /*
1005 * Note: while we initialise the firstblock/flist pair, they
1006 * should never be used because blocks should never be
1007 * allocated or freed for a delalloc extent and hence we need
1008 * don't cancel or finish them after the xfs_bunmapi() call.
1009 */
1010 xfs_bmap_init(&flist, &firstblock);
1011 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
1012 &flist, NULL, &done);
1013
1014 ASSERT(!flist.xbf_count && !flist.xbf_first);
1015 if (error) {
1016 /* something screwed, just bail */
1017 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1018 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
1019 "page discard unable to remove delalloc mapping.");
1020 }
1021 break;
1022 }
1023 next_buffer:
1024 offset += len;
1025
1026 } while ((bh = bh->b_this_page) != head);
1027
1028 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1029 out_invalidate:
1030 xfs_vm_invalidatepage(page, 0);
1031 return;
1032 }
1033
1034 /*
1035 * Calling this without startio set means we are being asked to make a dirty
1036 * page ready for freeing it's buffers. When called with startio set then
1037 * we are coming from writepage.
1038 *
1039 * When called with startio set it is important that we write the WHOLE
1040 * page if possible.
1041 * The bh->b_state's cannot know if any of the blocks or which block for
1042 * that matter are dirty due to mmap writes, and therefore bh uptodate is
1043 * only valid if the page itself isn't completely uptodate. Some layers
1044 * may clear the page dirty flag prior to calling write page, under the
1045 * assumption the entire page will be written out; by not writing out the
1046 * whole page the page can be reused before all valid dirty data is
1047 * written out. Note: in the case of a page that has been dirty'd by
1048 * mapwrite and but partially setup by block_prepare_write the
1049 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
1050 * valid state, thus the whole page must be written out thing.
1051 */
1052
1053 STATIC int
1054 xfs_page_state_convert(
1055 struct inode *inode,
1056 struct page *page,
1057 struct writeback_control *wbc,
1058 int startio,
1059 int unmapped) /* also implies page uptodate */
1060 {
1061 struct buffer_head *bh, *head;
1062 struct xfs_bmbt_irec imap;
1063 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1064 loff_t offset;
1065 unsigned long p_offset = 0;
1066 unsigned int type;
1067 __uint64_t end_offset;
1068 pgoff_t end_index, last_index, tlast;
1069 ssize_t size, len;
1070 int flags, err, imap_valid = 0, uptodate = 1;
1071 int page_dirty, count = 0;
1072 int trylock = 0;
1073 int all_bh = unmapped;
1074
1075 if (startio) {
1076 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
1077 trylock |= BMAPI_TRYLOCK;
1078 }
1079
1080 /* Is this page beyond the end of the file? */
1081 offset = i_size_read(inode);
1082 end_index = offset >> PAGE_CACHE_SHIFT;
1083 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1084 if (page->index >= end_index) {
1085 if ((page->index >= end_index + 1) ||
1086 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1087 if (startio)
1088 unlock_page(page);
1089 return 0;
1090 }
1091 }
1092
1093 /*
1094 * page_dirty is initially a count of buffers on the page before
1095 * EOF and is decremented as we move each into a cleanable state.
1096 *
1097 * Derivation:
1098 *
1099 * End offset is the highest offset that this page should represent.
1100 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1101 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1102 * hence give us the correct page_dirty count. On any other page,
1103 * it will be zero and in that case we need page_dirty to be the
1104 * count of buffers on the page.
1105 */
1106 end_offset = min_t(unsigned long long,
1107 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
1108 len = 1 << inode->i_blkbits;
1109 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1110 PAGE_CACHE_SIZE);
1111 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
1112 page_dirty = p_offset / len;
1113
1114 bh = head = page_buffers(page);
1115 offset = page_offset(page);
1116 flags = BMAPI_READ;
1117 type = IO_NEW;
1118
1119 /* TODO: cleanup count and page_dirty */
1120
1121 do {
1122 if (offset >= end_offset)
1123 break;
1124 if (!buffer_uptodate(bh))
1125 uptodate = 0;
1126 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1127 /*
1128 * the iomap is actually still valid, but the ioend
1129 * isn't. shouldn't happen too often.
1130 */
1131 imap_valid = 0;
1132 continue;
1133 }
1134
1135 if (imap_valid)
1136 imap_valid = xfs_imap_valid(inode, &imap, offset);
1137
1138 /*
1139 * First case, map an unwritten extent and prepare for
1140 * extent state conversion transaction on completion.
1141 *
1142 * Second case, allocate space for a delalloc buffer.
1143 * We can return EAGAIN here in the release page case.
1144 *
1145 * Third case, an unmapped buffer was found, and we are
1146 * in a path where we need to write the whole page out.
1147 */
1148 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1149 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1150 !buffer_mapped(bh) && (unmapped || startio))) {
1151 int new_ioend = 0;
1152
1153 /*
1154 * Make sure we don't use a read-only iomap
1155 */
1156 if (flags == BMAPI_READ)
1157 imap_valid = 0;
1158
1159 if (buffer_unwritten(bh)) {
1160 type = IO_UNWRITTEN;
1161 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1162 } else if (buffer_delay(bh)) {
1163 type = IO_DELAY;
1164 flags = BMAPI_ALLOCATE | trylock;
1165 } else {
1166 type = IO_NEW;
1167 flags = BMAPI_WRITE | BMAPI_MMAP;
1168 }
1169
1170 if (!imap_valid) {
1171 /*
1172 * if we didn't have a valid mapping then we
1173 * need to ensure that we put the new mapping
1174 * in a new ioend structure. This needs to be
1175 * done to ensure that the ioends correctly
1176 * reflect the block mappings at io completion
1177 * for unwritten extent conversion.
1178 */
1179 new_ioend = 1;
1180 if (type == IO_NEW) {
1181 size = xfs_probe_cluster(inode,
1182 page, bh, head, 0);
1183 } else {
1184 size = len;
1185 }
1186
1187 err = xfs_map_blocks(inode, offset, size,
1188 &imap, flags);
1189 if (err)
1190 goto error;
1191 imap_valid = xfs_imap_valid(inode, &imap,
1192 offset);
1193 }
1194 if (imap_valid) {
1195 xfs_map_at_offset(inode, bh, &imap, offset);
1196 if (startio) {
1197 xfs_add_to_ioend(inode, bh, offset,
1198 type, &ioend,
1199 new_ioend);
1200 } else {
1201 set_buffer_dirty(bh);
1202 unlock_buffer(bh);
1203 mark_buffer_dirty(bh);
1204 }
1205 page_dirty--;
1206 count++;
1207 }
1208 } else if (buffer_uptodate(bh) && startio) {
1209 /*
1210 * we got here because the buffer is already mapped.
1211 * That means it must already have extents allocated
1212 * underneath it. Map the extent by reading it.
1213 */
1214 if (!imap_valid || flags != BMAPI_READ) {
1215 flags = BMAPI_READ;
1216 size = xfs_probe_cluster(inode, page, bh,
1217 head, 1);
1218 err = xfs_map_blocks(inode, offset, size,
1219 &imap, flags);
1220 if (err)
1221 goto error;
1222 imap_valid = xfs_imap_valid(inode, &imap,
1223 offset);
1224 }
1225
1226 /*
1227 * We set the type to IO_NEW in case we are doing a
1228 * small write at EOF that is extending the file but
1229 * without needing an allocation. We need to update the
1230 * file size on I/O completion in this case so it is
1231 * the same case as having just allocated a new extent
1232 * that we are writing into for the first time.
1233 */
1234 type = IO_NEW;
1235 if (trylock_buffer(bh)) {
1236 ASSERT(buffer_mapped(bh));
1237 if (imap_valid)
1238 all_bh = 1;
1239 xfs_add_to_ioend(inode, bh, offset, type,
1240 &ioend, !imap_valid);
1241 page_dirty--;
1242 count++;
1243 } else {
1244 imap_valid = 0;
1245 }
1246 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1247 (unmapped || startio)) {
1248 imap_valid = 0;
1249 }
1250
1251 if (!iohead)
1252 iohead = ioend;
1253
1254 } while (offset += len, ((bh = bh->b_this_page) != head));
1255
1256 if (uptodate && bh == head)
1257 SetPageUptodate(page);
1258
1259 if (startio)
1260 xfs_start_page_writeback(page, 1, count);
1261
1262 if (ioend && imap_valid) {
1263 struct xfs_mount *m = XFS_I(inode)->i_mount;
1264 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap.br_startoff);
1265 xfs_off_t iomap_bsize = XFS_FSB_TO_B(m, imap.br_blockcount);
1266
1267 offset = (iomap_offset + iomap_bsize - 1) >>
1268 PAGE_CACHE_SHIFT;
1269 tlast = min_t(pgoff_t, offset, last_index);
1270 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1271 wbc, startio, all_bh, tlast);
1272 }
1273
1274 if (iohead)
1275 xfs_submit_ioend(wbc, iohead);
1276
1277 return page_dirty;
1278
1279 error:
1280 if (iohead)
1281 xfs_cancel_ioend(iohead);
1282
1283 /*
1284 * If it's delalloc and we have nowhere to put it,
1285 * throw it away, unless the lower layers told
1286 * us to try again.
1287 */
1288 if (err != -EAGAIN) {
1289 if (!unmapped)
1290 xfs_aops_discard_page(page);
1291 ClearPageUptodate(page);
1292 }
1293 return err;
1294 }
1295
1296 /*
1297 * writepage: Called from one of two places:
1298 *
1299 * 1. we are flushing a delalloc buffer head.
1300 *
1301 * 2. we are writing out a dirty page. Typically the page dirty
1302 * state is cleared before we get here. In this case is it
1303 * conceivable we have no buffer heads.
1304 *
1305 * For delalloc space on the page we need to allocate space and
1306 * flush it. For unmapped buffer heads on the page we should
1307 * allocate space if the page is uptodate. For any other dirty
1308 * buffer heads on the page we should flush them.
1309 *
1310 * If we detect that a transaction would be required to flush
1311 * the page, we have to check the process flags first, if we
1312 * are already in a transaction or disk I/O during allocations
1313 * is off, we need to fail the writepage and redirty the page.
1314 */
1315
1316 STATIC int
1317 xfs_vm_writepage(
1318 struct page *page,
1319 struct writeback_control *wbc)
1320 {
1321 int error;
1322 int need_trans;
1323 int delalloc, unmapped, unwritten;
1324 struct inode *inode = page->mapping->host;
1325
1326 trace_xfs_writepage(inode, page, 0);
1327
1328 /*
1329 * We need a transaction if:
1330 * 1. There are delalloc buffers on the page
1331 * 2. The page is uptodate and we have unmapped buffers
1332 * 3. The page is uptodate and we have no buffers
1333 * 4. There are unwritten buffers on the page
1334 */
1335
1336 if (!page_has_buffers(page)) {
1337 unmapped = 1;
1338 need_trans = 1;
1339 } else {
1340 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1341 if (!PageUptodate(page))
1342 unmapped = 0;
1343 need_trans = delalloc + unmapped + unwritten;
1344 }
1345
1346 /*
1347 * If we need a transaction and the process flags say
1348 * we are already in a transaction, or no IO is allowed
1349 * then mark the page dirty again and leave the page
1350 * as is.
1351 */
1352 if (current_test_flags(PF_FSTRANS) && need_trans)
1353 goto out_fail;
1354
1355 /*
1356 * Delay hooking up buffer heads until we have
1357 * made our go/no-go decision.
1358 */
1359 if (!page_has_buffers(page))
1360 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1361
1362
1363 /*
1364 * VM calculation for nr_to_write seems off. Bump it way
1365 * up, this gets simple streaming writes zippy again.
1366 * To be reviewed again after Jens' writeback changes.
1367 */
1368 wbc->nr_to_write *= 4;
1369
1370 /*
1371 * Convert delayed allocate, unwritten or unmapped space
1372 * to real space and flush out to disk.
1373 */
1374 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1375 if (error == -EAGAIN)
1376 goto out_fail;
1377 if (unlikely(error < 0))
1378 goto out_unlock;
1379
1380 return 0;
1381
1382 out_fail:
1383 redirty_page_for_writepage(wbc, page);
1384 unlock_page(page);
1385 return 0;
1386 out_unlock:
1387 unlock_page(page);
1388 return error;
1389 }
1390
1391 STATIC int
1392 xfs_vm_writepages(
1393 struct address_space *mapping,
1394 struct writeback_control *wbc)
1395 {
1396 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1397 return generic_writepages(mapping, wbc);
1398 }
1399
1400 /*
1401 * Called to move a page into cleanable state - and from there
1402 * to be released. Possibly the page is already clean. We always
1403 * have buffer heads in this call.
1404 *
1405 * Returns 0 if the page is ok to release, 1 otherwise.
1406 *
1407 * Possible scenarios are:
1408 *
1409 * 1. We are being called to release a page which has been written
1410 * to via regular I/O. buffer heads will be dirty and possibly
1411 * delalloc. If no delalloc buffer heads in this case then we
1412 * can just return zero.
1413 *
1414 * 2. We are called to release a page which has been written via
1415 * mmap, all we need to do is ensure there is no delalloc
1416 * state in the buffer heads, if not we can let the caller
1417 * free them and we should come back later via writepage.
1418 */
1419 STATIC int
1420 xfs_vm_releasepage(
1421 struct page *page,
1422 gfp_t gfp_mask)
1423 {
1424 struct inode *inode = page->mapping->host;
1425 int dirty, delalloc, unmapped, unwritten;
1426 struct writeback_control wbc = {
1427 .sync_mode = WB_SYNC_ALL,
1428 .nr_to_write = 1,
1429 };
1430
1431 trace_xfs_releasepage(inode, page, 0);
1432
1433 if (!page_has_buffers(page))
1434 return 0;
1435
1436 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1437 if (!delalloc && !unwritten)
1438 goto free_buffers;
1439
1440 if (!(gfp_mask & __GFP_FS))
1441 return 0;
1442
1443 /* If we are already inside a transaction or the thread cannot
1444 * do I/O, we cannot release this page.
1445 */
1446 if (current_test_flags(PF_FSTRANS))
1447 return 0;
1448
1449 /*
1450 * Convert delalloc space to real space, do not flush the
1451 * data out to disk, that will be done by the caller.
1452 * Never need to allocate space here - we will always
1453 * come back to writepage in that case.
1454 */
1455 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1456 if (dirty == 0 && !unwritten)
1457 goto free_buffers;
1458 return 0;
1459
1460 free_buffers:
1461 return try_to_free_buffers(page);
1462 }
1463
1464 STATIC int
1465 __xfs_get_blocks(
1466 struct inode *inode,
1467 sector_t iblock,
1468 struct buffer_head *bh_result,
1469 int create,
1470 int direct,
1471 bmapi_flags_t flags)
1472 {
1473 struct xfs_bmbt_irec imap;
1474 xfs_off_t offset;
1475 ssize_t size;
1476 int nimap = 1;
1477 int new = 0;
1478 int error;
1479
1480 offset = (xfs_off_t)iblock << inode->i_blkbits;
1481 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1482 size = bh_result->b_size;
1483
1484 if (!create && direct && offset >= i_size_read(inode))
1485 return 0;
1486
1487 error = xfs_iomap(XFS_I(inode), offset, size,
1488 create ? flags : BMAPI_READ, &imap, &nimap, &new);
1489 if (error)
1490 return -error;
1491 if (nimap == 0)
1492 return 0;
1493
1494 if (imap.br_startblock != HOLESTARTBLOCK &&
1495 imap.br_startblock != DELAYSTARTBLOCK) {
1496 /*
1497 * For unwritten extents do not report a disk address on
1498 * the read case (treat as if we're reading into a hole).
1499 */
1500 if (create || !ISUNWRITTEN(&imap))
1501 xfs_map_buffer(inode, bh_result, &imap, offset);
1502 if (create && ISUNWRITTEN(&imap)) {
1503 if (direct)
1504 bh_result->b_private = inode;
1505 set_buffer_unwritten(bh_result);
1506 }
1507 }
1508
1509 /*
1510 * If this is a realtime file, data may be on a different device.
1511 * to that pointed to from the buffer_head b_bdev currently.
1512 */
1513 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1514
1515 /*
1516 * If we previously allocated a block out beyond eof and we are now
1517 * coming back to use it then we will need to flag it as new even if it
1518 * has a disk address.
1519 *
1520 * With sub-block writes into unwritten extents we also need to mark
1521 * the buffer as new so that the unwritten parts of the buffer gets
1522 * correctly zeroed.
1523 */
1524 if (create &&
1525 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1526 (offset >= i_size_read(inode)) ||
1527 (new || ISUNWRITTEN(&imap))))
1528 set_buffer_new(bh_result);
1529
1530 if (imap.br_startblock == DELAYSTARTBLOCK) {
1531 BUG_ON(direct);
1532 if (create) {
1533 set_buffer_uptodate(bh_result);
1534 set_buffer_mapped(bh_result);
1535 set_buffer_delay(bh_result);
1536 }
1537 }
1538
1539 if (direct || size > (1 << inode->i_blkbits)) {
1540 struct xfs_mount *mp = XFS_I(inode)->i_mount;
1541 xfs_off_t iomap_offset = XFS_FSB_TO_B(mp, imap.br_startoff);
1542 xfs_off_t iomap_delta = offset - iomap_offset;
1543 xfs_off_t iomap_bsize = XFS_FSB_TO_B(mp, imap.br_blockcount);
1544
1545 ASSERT(iomap_bsize - iomap_delta > 0);
1546 offset = min_t(xfs_off_t,
1547 iomap_bsize - iomap_delta, size);
1548 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1549 }
1550
1551 return 0;
1552 }
1553
1554 int
1555 xfs_get_blocks(
1556 struct inode *inode,
1557 sector_t iblock,
1558 struct buffer_head *bh_result,
1559 int create)
1560 {
1561 return __xfs_get_blocks(inode, iblock,
1562 bh_result, create, 0, BMAPI_WRITE);
1563 }
1564
1565 STATIC int
1566 xfs_get_blocks_direct(
1567 struct inode *inode,
1568 sector_t iblock,
1569 struct buffer_head *bh_result,
1570 int create)
1571 {
1572 return __xfs_get_blocks(inode, iblock,
1573 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1574 }
1575
1576 STATIC void
1577 xfs_end_io_direct(
1578 struct kiocb *iocb,
1579 loff_t offset,
1580 ssize_t size,
1581 void *private)
1582 {
1583 xfs_ioend_t *ioend = iocb->private;
1584
1585 /*
1586 * Non-NULL private data means we need to issue a transaction to
1587 * convert a range from unwritten to written extents. This needs
1588 * to happen from process context but aio+dio I/O completion
1589 * happens from irq context so we need to defer it to a workqueue.
1590 * This is not necessary for synchronous direct I/O, but we do
1591 * it anyway to keep the code uniform and simpler.
1592 *
1593 * Well, if only it were that simple. Because synchronous direct I/O
1594 * requires extent conversion to occur *before* we return to userspace,
1595 * we have to wait for extent conversion to complete. Look at the
1596 * iocb that has been passed to us to determine if this is AIO or
1597 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1598 * workqueue and wait for it to complete.
1599 *
1600 * The core direct I/O code might be changed to always call the
1601 * completion handler in the future, in which case all this can
1602 * go away.
1603 */
1604 ioend->io_offset = offset;
1605 ioend->io_size = size;
1606 if (ioend->io_type == IO_READ) {
1607 xfs_finish_ioend(ioend, 0);
1608 } else if (private && size > 0) {
1609 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1610 } else {
1611 /*
1612 * A direct I/O write ioend starts it's life in unwritten
1613 * state in case they map an unwritten extent. This write
1614 * didn't map an unwritten extent so switch it's completion
1615 * handler.
1616 */
1617 ioend->io_type = IO_NEW;
1618 xfs_finish_ioend(ioend, 0);
1619 }
1620
1621 /*
1622 * blockdev_direct_IO can return an error even after the I/O
1623 * completion handler was called. Thus we need to protect
1624 * against double-freeing.
1625 */
1626 iocb->private = NULL;
1627 }
1628
1629 STATIC ssize_t
1630 xfs_vm_direct_IO(
1631 int rw,
1632 struct kiocb *iocb,
1633 const struct iovec *iov,
1634 loff_t offset,
1635 unsigned long nr_segs)
1636 {
1637 struct file *file = iocb->ki_filp;
1638 struct inode *inode = file->f_mapping->host;
1639 struct block_device *bdev;
1640 ssize_t ret;
1641
1642 bdev = xfs_find_bdev_for_inode(inode);
1643
1644 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1645 IO_UNWRITTEN : IO_READ);
1646
1647 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1648 offset, nr_segs,
1649 xfs_get_blocks_direct,
1650 xfs_end_io_direct);
1651
1652 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1653 xfs_destroy_ioend(iocb->private);
1654 return ret;
1655 }
1656
1657 STATIC int
1658 xfs_vm_write_begin(
1659 struct file *file,
1660 struct address_space *mapping,
1661 loff_t pos,
1662 unsigned len,
1663 unsigned flags,
1664 struct page **pagep,
1665 void **fsdata)
1666 {
1667 *pagep = NULL;
1668 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1669 xfs_get_blocks);
1670 }
1671
1672 STATIC sector_t
1673 xfs_vm_bmap(
1674 struct address_space *mapping,
1675 sector_t block)
1676 {
1677 struct inode *inode = (struct inode *)mapping->host;
1678 struct xfs_inode *ip = XFS_I(inode);
1679
1680 xfs_itrace_entry(XFS_I(inode));
1681 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1682 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1683 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1684 return generic_block_bmap(mapping, block, xfs_get_blocks);
1685 }
1686
1687 STATIC int
1688 xfs_vm_readpage(
1689 struct file *unused,
1690 struct page *page)
1691 {
1692 return mpage_readpage(page, xfs_get_blocks);
1693 }
1694
1695 STATIC int
1696 xfs_vm_readpages(
1697 struct file *unused,
1698 struct address_space *mapping,
1699 struct list_head *pages,
1700 unsigned nr_pages)
1701 {
1702 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1703 }
1704
1705 const struct address_space_operations xfs_address_space_operations = {
1706 .readpage = xfs_vm_readpage,
1707 .readpages = xfs_vm_readpages,
1708 .writepage = xfs_vm_writepage,
1709 .writepages = xfs_vm_writepages,
1710 .sync_page = block_sync_page,
1711 .releasepage = xfs_vm_releasepage,
1712 .invalidatepage = xfs_vm_invalidatepage,
1713 .write_begin = xfs_vm_write_begin,
1714 .write_end = generic_write_end,
1715 .bmap = xfs_vm_bmap,
1716 .direct_IO = xfs_vm_direct_IO,
1717 .migratepage = buffer_migrate_page,
1718 .is_partially_uptodate = block_is_partially_uptodate,
1719 .error_remove_page = generic_error_remove_page,
1720 };
This page took 0.06455 seconds and 4 git commands to generate.