WorkStruct: make allyesconfig
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_aops.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_inum.h"
22 #include "xfs_sb.h"
23 #include "xfs_ag.h"
24 #include "xfs_dir2.h"
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
38 #include "xfs_rw.h"
39 #include "xfs_iomap.h"
40 #include <linux/mpage.h>
41 #include <linux/pagevec.h>
42 #include <linux/writeback.h>
43
44 STATIC void
45 xfs_count_page_state(
46 struct page *page,
47 int *delalloc,
48 int *unmapped,
49 int *unwritten)
50 {
51 struct buffer_head *bh, *head;
52
53 *delalloc = *unmapped = *unwritten = 0;
54
55 bh = head = page_buffers(page);
56 do {
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
58 (*unmapped) = 1;
59 else if (buffer_unwritten(bh) && !buffer_delay(bh))
60 clear_buffer_unwritten(bh);
61 else if (buffer_unwritten(bh))
62 (*unwritten) = 1;
63 else if (buffer_delay(bh))
64 (*delalloc) = 1;
65 } while ((bh = bh->b_this_page) != head);
66 }
67
68 #if defined(XFS_RW_TRACE)
69 void
70 xfs_page_trace(
71 int tag,
72 struct inode *inode,
73 struct page *page,
74 unsigned long pgoff)
75 {
76 xfs_inode_t *ip;
77 bhv_vnode_t *vp = vn_from_inode(inode);
78 loff_t isize = i_size_read(inode);
79 loff_t offset = page_offset(page);
80 int delalloc = -1, unmapped = -1, unwritten = -1;
81
82 if (page_has_buffers(page))
83 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
84
85 ip = xfs_vtoi(vp);
86 if (!ip->i_rwtrace)
87 return;
88
89 ktrace_enter(ip->i_rwtrace,
90 (void *)((unsigned long)tag),
91 (void *)ip,
92 (void *)inode,
93 (void *)page,
94 (void *)pgoff,
95 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
97 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(isize & 0xffffffff)),
99 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
100 (void *)((unsigned long)(offset & 0xffffffff)),
101 (void *)((unsigned long)delalloc),
102 (void *)((unsigned long)unmapped),
103 (void *)((unsigned long)unwritten),
104 (void *)((unsigned long)current_pid()),
105 (void *)NULL);
106 }
107 #else
108 #define xfs_page_trace(tag, inode, page, pgoff)
109 #endif
110
111 /*
112 * Schedule IO completion handling on a xfsdatad if this was
113 * the final hold on this ioend.
114 */
115 STATIC void
116 xfs_finish_ioend(
117 xfs_ioend_t *ioend)
118 {
119 if (atomic_dec_and_test(&ioend->io_remaining))
120 queue_work(xfsdatad_workqueue, &ioend->io_work);
121 }
122
123 /*
124 * We're now finished for good with this ioend structure.
125 * Update the page state via the associated buffer_heads,
126 * release holds on the inode and bio, and finally free
127 * up memory. Do not use the ioend after this.
128 */
129 STATIC void
130 xfs_destroy_ioend(
131 xfs_ioend_t *ioend)
132 {
133 struct buffer_head *bh, *next;
134
135 for (bh = ioend->io_buffer_head; bh; bh = next) {
136 next = bh->b_private;
137 bh->b_end_io(bh, !ioend->io_error);
138 }
139 if (unlikely(ioend->io_error))
140 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
141 vn_iowake(ioend->io_vnode);
142 mempool_free(ioend, xfs_ioend_pool);
143 }
144
145 /*
146 * Buffered IO write completion for delayed allocate extents.
147 * TODO: Update ondisk isize now that we know the file data
148 * has been flushed (i.e. the notorious "NULL file" problem).
149 */
150 STATIC void
151 xfs_end_bio_delalloc(
152 struct work_struct *work)
153 {
154 xfs_ioend_t *ioend =
155 container_of(work, xfs_ioend_t, io_work);
156
157 xfs_destroy_ioend(ioend);
158 }
159
160 /*
161 * Buffered IO write completion for regular, written extents.
162 */
163 STATIC void
164 xfs_end_bio_written(
165 struct work_struct *work)
166 {
167 xfs_ioend_t *ioend =
168 container_of(work, xfs_ioend_t, io_work);
169
170 xfs_destroy_ioend(ioend);
171 }
172
173 /*
174 * IO write completion for unwritten extents.
175 *
176 * Issue transactions to convert a buffer range from unwritten
177 * to written extents.
178 */
179 STATIC void
180 xfs_end_bio_unwritten(
181 struct work_struct *work)
182 {
183 xfs_ioend_t *ioend =
184 container_of(work, xfs_ioend_t, io_work);
185 bhv_vnode_t *vp = ioend->io_vnode;
186 xfs_off_t offset = ioend->io_offset;
187 size_t size = ioend->io_size;
188
189 if (likely(!ioend->io_error))
190 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
191 xfs_destroy_ioend(ioend);
192 }
193
194 /*
195 * Allocate and initialise an IO completion structure.
196 * We need to track unwritten extent write completion here initially.
197 * We'll need to extend this for updating the ondisk inode size later
198 * (vs. incore size).
199 */
200 STATIC xfs_ioend_t *
201 xfs_alloc_ioend(
202 struct inode *inode,
203 unsigned int type)
204 {
205 xfs_ioend_t *ioend;
206
207 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
208
209 /*
210 * Set the count to 1 initially, which will prevent an I/O
211 * completion callback from happening before we have started
212 * all the I/O from calling the completion routine too early.
213 */
214 atomic_set(&ioend->io_remaining, 1);
215 ioend->io_error = 0;
216 ioend->io_list = NULL;
217 ioend->io_type = type;
218 ioend->io_vnode = vn_from_inode(inode);
219 ioend->io_buffer_head = NULL;
220 ioend->io_buffer_tail = NULL;
221 atomic_inc(&ioend->io_vnode->v_iocount);
222 ioend->io_offset = 0;
223 ioend->io_size = 0;
224
225 if (type == IOMAP_UNWRITTEN)
226 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
227 else if (type == IOMAP_DELAY)
228 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
229 else
230 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
231
232 return ioend;
233 }
234
235 STATIC int
236 xfs_map_blocks(
237 struct inode *inode,
238 loff_t offset,
239 ssize_t count,
240 xfs_iomap_t *mapp,
241 int flags)
242 {
243 bhv_vnode_t *vp = vn_from_inode(inode);
244 int error, nmaps = 1;
245
246 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
247 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
248 VMODIFY(vp);
249 return -error;
250 }
251
252 STATIC inline int
253 xfs_iomap_valid(
254 xfs_iomap_t *iomapp,
255 loff_t offset)
256 {
257 return offset >= iomapp->iomap_offset &&
258 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
259 }
260
261 /*
262 * BIO completion handler for buffered IO.
263 */
264 STATIC int
265 xfs_end_bio(
266 struct bio *bio,
267 unsigned int bytes_done,
268 int error)
269 {
270 xfs_ioend_t *ioend = bio->bi_private;
271
272 if (bio->bi_size)
273 return 1;
274
275 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
276 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
277
278 /* Toss bio and pass work off to an xfsdatad thread */
279 bio->bi_private = NULL;
280 bio->bi_end_io = NULL;
281 bio_put(bio);
282
283 xfs_finish_ioend(ioend);
284 return 0;
285 }
286
287 STATIC void
288 xfs_submit_ioend_bio(
289 xfs_ioend_t *ioend,
290 struct bio *bio)
291 {
292 atomic_inc(&ioend->io_remaining);
293
294 bio->bi_private = ioend;
295 bio->bi_end_io = xfs_end_bio;
296
297 submit_bio(WRITE, bio);
298 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
299 bio_put(bio);
300 }
301
302 STATIC struct bio *
303 xfs_alloc_ioend_bio(
304 struct buffer_head *bh)
305 {
306 struct bio *bio;
307 int nvecs = bio_get_nr_vecs(bh->b_bdev);
308
309 do {
310 bio = bio_alloc(GFP_NOIO, nvecs);
311 nvecs >>= 1;
312 } while (!bio);
313
314 ASSERT(bio->bi_private == NULL);
315 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
316 bio->bi_bdev = bh->b_bdev;
317 bio_get(bio);
318 return bio;
319 }
320
321 STATIC void
322 xfs_start_buffer_writeback(
323 struct buffer_head *bh)
324 {
325 ASSERT(buffer_mapped(bh));
326 ASSERT(buffer_locked(bh));
327 ASSERT(!buffer_delay(bh));
328 ASSERT(!buffer_unwritten(bh));
329
330 mark_buffer_async_write(bh);
331 set_buffer_uptodate(bh);
332 clear_buffer_dirty(bh);
333 }
334
335 STATIC void
336 xfs_start_page_writeback(
337 struct page *page,
338 struct writeback_control *wbc,
339 int clear_dirty,
340 int buffers)
341 {
342 ASSERT(PageLocked(page));
343 ASSERT(!PageWriteback(page));
344 set_page_writeback(page);
345 if (clear_dirty)
346 clear_page_dirty(page);
347 unlock_page(page);
348 if (!buffers) {
349 end_page_writeback(page);
350 wbc->pages_skipped++; /* We didn't write this page */
351 }
352 }
353
354 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
355 {
356 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
357 }
358
359 /*
360 * Submit all of the bios for all of the ioends we have saved up, covering the
361 * initial writepage page and also any probed pages.
362 *
363 * Because we may have multiple ioends spanning a page, we need to start
364 * writeback on all the buffers before we submit them for I/O. If we mark the
365 * buffers as we got, then we can end up with a page that only has buffers
366 * marked async write and I/O complete on can occur before we mark the other
367 * buffers async write.
368 *
369 * The end result of this is that we trip a bug in end_page_writeback() because
370 * we call it twice for the one page as the code in end_buffer_async_write()
371 * assumes that all buffers on the page are started at the same time.
372 *
373 * The fix is two passes across the ioend list - one to start writeback on the
374 * buffer_heads, and then submit them for I/O on the second pass.
375 */
376 STATIC void
377 xfs_submit_ioend(
378 xfs_ioend_t *ioend)
379 {
380 xfs_ioend_t *head = ioend;
381 xfs_ioend_t *next;
382 struct buffer_head *bh;
383 struct bio *bio;
384 sector_t lastblock = 0;
385
386 /* Pass 1 - start writeback */
387 do {
388 next = ioend->io_list;
389 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
390 xfs_start_buffer_writeback(bh);
391 }
392 } while ((ioend = next) != NULL);
393
394 /* Pass 2 - submit I/O */
395 ioend = head;
396 do {
397 next = ioend->io_list;
398 bio = NULL;
399
400 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
401
402 if (!bio) {
403 retry:
404 bio = xfs_alloc_ioend_bio(bh);
405 } else if (bh->b_blocknr != lastblock + 1) {
406 xfs_submit_ioend_bio(ioend, bio);
407 goto retry;
408 }
409
410 if (bio_add_buffer(bio, bh) != bh->b_size) {
411 xfs_submit_ioend_bio(ioend, bio);
412 goto retry;
413 }
414
415 lastblock = bh->b_blocknr;
416 }
417 if (bio)
418 xfs_submit_ioend_bio(ioend, bio);
419 xfs_finish_ioend(ioend);
420 } while ((ioend = next) != NULL);
421 }
422
423 /*
424 * Cancel submission of all buffer_heads so far in this endio.
425 * Toss the endio too. Only ever called for the initial page
426 * in a writepage request, so only ever one page.
427 */
428 STATIC void
429 xfs_cancel_ioend(
430 xfs_ioend_t *ioend)
431 {
432 xfs_ioend_t *next;
433 struct buffer_head *bh, *next_bh;
434
435 do {
436 next = ioend->io_list;
437 bh = ioend->io_buffer_head;
438 do {
439 next_bh = bh->b_private;
440 clear_buffer_async_write(bh);
441 unlock_buffer(bh);
442 } while ((bh = next_bh) != NULL);
443
444 vn_iowake(ioend->io_vnode);
445 mempool_free(ioend, xfs_ioend_pool);
446 } while ((ioend = next) != NULL);
447 }
448
449 /*
450 * Test to see if we've been building up a completion structure for
451 * earlier buffers -- if so, we try to append to this ioend if we
452 * can, otherwise we finish off any current ioend and start another.
453 * Return true if we've finished the given ioend.
454 */
455 STATIC void
456 xfs_add_to_ioend(
457 struct inode *inode,
458 struct buffer_head *bh,
459 xfs_off_t offset,
460 unsigned int type,
461 xfs_ioend_t **result,
462 int need_ioend)
463 {
464 xfs_ioend_t *ioend = *result;
465
466 if (!ioend || need_ioend || type != ioend->io_type) {
467 xfs_ioend_t *previous = *result;
468
469 ioend = xfs_alloc_ioend(inode, type);
470 ioend->io_offset = offset;
471 ioend->io_buffer_head = bh;
472 ioend->io_buffer_tail = bh;
473 if (previous)
474 previous->io_list = ioend;
475 *result = ioend;
476 } else {
477 ioend->io_buffer_tail->b_private = bh;
478 ioend->io_buffer_tail = bh;
479 }
480
481 bh->b_private = NULL;
482 ioend->io_size += bh->b_size;
483 }
484
485 STATIC void
486 xfs_map_buffer(
487 struct buffer_head *bh,
488 xfs_iomap_t *mp,
489 xfs_off_t offset,
490 uint block_bits)
491 {
492 sector_t bn;
493
494 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
495
496 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
497 ((offset - mp->iomap_offset) >> block_bits);
498
499 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
500
501 bh->b_blocknr = bn;
502 set_buffer_mapped(bh);
503 }
504
505 STATIC void
506 xfs_map_at_offset(
507 struct buffer_head *bh,
508 loff_t offset,
509 int block_bits,
510 xfs_iomap_t *iomapp)
511 {
512 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
513 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
514
515 lock_buffer(bh);
516 xfs_map_buffer(bh, iomapp, offset, block_bits);
517 bh->b_bdev = iomapp->iomap_target->bt_bdev;
518 set_buffer_mapped(bh);
519 clear_buffer_delay(bh);
520 clear_buffer_unwritten(bh);
521 }
522
523 /*
524 * Look for a page at index that is suitable for clustering.
525 */
526 STATIC unsigned int
527 xfs_probe_page(
528 struct page *page,
529 unsigned int pg_offset,
530 int mapped)
531 {
532 int ret = 0;
533
534 if (PageWriteback(page))
535 return 0;
536
537 if (page->mapping && PageDirty(page)) {
538 if (page_has_buffers(page)) {
539 struct buffer_head *bh, *head;
540
541 bh = head = page_buffers(page);
542 do {
543 if (!buffer_uptodate(bh))
544 break;
545 if (mapped != buffer_mapped(bh))
546 break;
547 ret += bh->b_size;
548 if (ret >= pg_offset)
549 break;
550 } while ((bh = bh->b_this_page) != head);
551 } else
552 ret = mapped ? 0 : PAGE_CACHE_SIZE;
553 }
554
555 return ret;
556 }
557
558 STATIC size_t
559 xfs_probe_cluster(
560 struct inode *inode,
561 struct page *startpage,
562 struct buffer_head *bh,
563 struct buffer_head *head,
564 int mapped)
565 {
566 struct pagevec pvec;
567 pgoff_t tindex, tlast, tloff;
568 size_t total = 0;
569 int done = 0, i;
570
571 /* First sum forwards in this page */
572 do {
573 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
574 return total;
575 total += bh->b_size;
576 } while ((bh = bh->b_this_page) != head);
577
578 /* if we reached the end of the page, sum forwards in following pages */
579 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
580 tindex = startpage->index + 1;
581
582 /* Prune this back to avoid pathological behavior */
583 tloff = min(tlast, startpage->index + 64);
584
585 pagevec_init(&pvec, 0);
586 while (!done && tindex <= tloff) {
587 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
588
589 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
590 break;
591
592 for (i = 0; i < pagevec_count(&pvec); i++) {
593 struct page *page = pvec.pages[i];
594 size_t pg_offset, len = 0;
595
596 if (tindex == tlast) {
597 pg_offset =
598 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
599 if (!pg_offset) {
600 done = 1;
601 break;
602 }
603 } else
604 pg_offset = PAGE_CACHE_SIZE;
605
606 if (page->index == tindex && !TestSetPageLocked(page)) {
607 len = xfs_probe_page(page, pg_offset, mapped);
608 unlock_page(page);
609 }
610
611 if (!len) {
612 done = 1;
613 break;
614 }
615
616 total += len;
617 tindex++;
618 }
619
620 pagevec_release(&pvec);
621 cond_resched();
622 }
623
624 return total;
625 }
626
627 /*
628 * Test if a given page is suitable for writing as part of an unwritten
629 * or delayed allocate extent.
630 */
631 STATIC int
632 xfs_is_delayed_page(
633 struct page *page,
634 unsigned int type)
635 {
636 if (PageWriteback(page))
637 return 0;
638
639 if (page->mapping && page_has_buffers(page)) {
640 struct buffer_head *bh, *head;
641 int acceptable = 0;
642
643 bh = head = page_buffers(page);
644 do {
645 if (buffer_unwritten(bh))
646 acceptable = (type == IOMAP_UNWRITTEN);
647 else if (buffer_delay(bh))
648 acceptable = (type == IOMAP_DELAY);
649 else if (buffer_dirty(bh) && buffer_mapped(bh))
650 acceptable = (type == 0);
651 else
652 break;
653 } while ((bh = bh->b_this_page) != head);
654
655 if (acceptable)
656 return 1;
657 }
658
659 return 0;
660 }
661
662 /*
663 * Allocate & map buffers for page given the extent map. Write it out.
664 * except for the original page of a writepage, this is called on
665 * delalloc/unwritten pages only, for the original page it is possible
666 * that the page has no mapping at all.
667 */
668 STATIC int
669 xfs_convert_page(
670 struct inode *inode,
671 struct page *page,
672 loff_t tindex,
673 xfs_iomap_t *mp,
674 xfs_ioend_t **ioendp,
675 struct writeback_control *wbc,
676 int startio,
677 int all_bh)
678 {
679 struct buffer_head *bh, *head;
680 xfs_off_t end_offset;
681 unsigned long p_offset;
682 unsigned int type;
683 int bbits = inode->i_blkbits;
684 int len, page_dirty;
685 int count = 0, done = 0, uptodate = 1;
686 xfs_off_t offset = page_offset(page);
687
688 if (page->index != tindex)
689 goto fail;
690 if (TestSetPageLocked(page))
691 goto fail;
692 if (PageWriteback(page))
693 goto fail_unlock_page;
694 if (page->mapping != inode->i_mapping)
695 goto fail_unlock_page;
696 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
697 goto fail_unlock_page;
698
699 /*
700 * page_dirty is initially a count of buffers on the page before
701 * EOF and is decremented as we move each into a cleanable state.
702 *
703 * Derivation:
704 *
705 * End offset is the highest offset that this page should represent.
706 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
707 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
708 * hence give us the correct page_dirty count. On any other page,
709 * it will be zero and in that case we need page_dirty to be the
710 * count of buffers on the page.
711 */
712 end_offset = min_t(unsigned long long,
713 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
714 i_size_read(inode));
715
716 len = 1 << inode->i_blkbits;
717 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
718 PAGE_CACHE_SIZE);
719 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
720 page_dirty = p_offset / len;
721
722 bh = head = page_buffers(page);
723 do {
724 if (offset >= end_offset)
725 break;
726 if (!buffer_uptodate(bh))
727 uptodate = 0;
728 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
729 done = 1;
730 continue;
731 }
732
733 if (buffer_unwritten(bh) || buffer_delay(bh)) {
734 if (buffer_unwritten(bh))
735 type = IOMAP_UNWRITTEN;
736 else
737 type = IOMAP_DELAY;
738
739 if (!xfs_iomap_valid(mp, offset)) {
740 done = 1;
741 continue;
742 }
743
744 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
745 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
746
747 xfs_map_at_offset(bh, offset, bbits, mp);
748 if (startio) {
749 xfs_add_to_ioend(inode, bh, offset,
750 type, ioendp, done);
751 } else {
752 set_buffer_dirty(bh);
753 unlock_buffer(bh);
754 mark_buffer_dirty(bh);
755 }
756 page_dirty--;
757 count++;
758 } else {
759 type = 0;
760 if (buffer_mapped(bh) && all_bh && startio) {
761 lock_buffer(bh);
762 xfs_add_to_ioend(inode, bh, offset,
763 type, ioendp, done);
764 count++;
765 page_dirty--;
766 } else {
767 done = 1;
768 }
769 }
770 } while (offset += len, (bh = bh->b_this_page) != head);
771
772 if (uptodate && bh == head)
773 SetPageUptodate(page);
774
775 if (startio) {
776 if (count) {
777 struct backing_dev_info *bdi;
778
779 bdi = inode->i_mapping->backing_dev_info;
780 wbc->nr_to_write--;
781 if (bdi_write_congested(bdi)) {
782 wbc->encountered_congestion = 1;
783 done = 1;
784 } else if (wbc->nr_to_write <= 0) {
785 done = 1;
786 }
787 }
788 xfs_start_page_writeback(page, wbc, !page_dirty, count);
789 }
790
791 return done;
792 fail_unlock_page:
793 unlock_page(page);
794 fail:
795 return 1;
796 }
797
798 /*
799 * Convert & write out a cluster of pages in the same extent as defined
800 * by mp and following the start page.
801 */
802 STATIC void
803 xfs_cluster_write(
804 struct inode *inode,
805 pgoff_t tindex,
806 xfs_iomap_t *iomapp,
807 xfs_ioend_t **ioendp,
808 struct writeback_control *wbc,
809 int startio,
810 int all_bh,
811 pgoff_t tlast)
812 {
813 struct pagevec pvec;
814 int done = 0, i;
815
816 pagevec_init(&pvec, 0);
817 while (!done && tindex <= tlast) {
818 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
819
820 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
821 break;
822
823 for (i = 0; i < pagevec_count(&pvec); i++) {
824 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
825 iomapp, ioendp, wbc, startio, all_bh);
826 if (done)
827 break;
828 }
829
830 pagevec_release(&pvec);
831 cond_resched();
832 }
833 }
834
835 /*
836 * Calling this without startio set means we are being asked to make a dirty
837 * page ready for freeing it's buffers. When called with startio set then
838 * we are coming from writepage.
839 *
840 * When called with startio set it is important that we write the WHOLE
841 * page if possible.
842 * The bh->b_state's cannot know if any of the blocks or which block for
843 * that matter are dirty due to mmap writes, and therefore bh uptodate is
844 * only valid if the page itself isn't completely uptodate. Some layers
845 * may clear the page dirty flag prior to calling write page, under the
846 * assumption the entire page will be written out; by not writing out the
847 * whole page the page can be reused before all valid dirty data is
848 * written out. Note: in the case of a page that has been dirty'd by
849 * mapwrite and but partially setup by block_prepare_write the
850 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
851 * valid state, thus the whole page must be written out thing.
852 */
853
854 STATIC int
855 xfs_page_state_convert(
856 struct inode *inode,
857 struct page *page,
858 struct writeback_control *wbc,
859 int startio,
860 int unmapped) /* also implies page uptodate */
861 {
862 struct buffer_head *bh, *head;
863 xfs_iomap_t iomap;
864 xfs_ioend_t *ioend = NULL, *iohead = NULL;
865 loff_t offset;
866 unsigned long p_offset = 0;
867 unsigned int type;
868 __uint64_t end_offset;
869 pgoff_t end_index, last_index, tlast;
870 ssize_t size, len;
871 int flags, err, iomap_valid = 0, uptodate = 1;
872 int page_dirty, count = 0;
873 int trylock = 0;
874 int all_bh = unmapped;
875
876 if (startio) {
877 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
878 trylock |= BMAPI_TRYLOCK;
879 }
880
881 /* Is this page beyond the end of the file? */
882 offset = i_size_read(inode);
883 end_index = offset >> PAGE_CACHE_SHIFT;
884 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
885 if (page->index >= end_index) {
886 if ((page->index >= end_index + 1) ||
887 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
888 if (startio)
889 unlock_page(page);
890 return 0;
891 }
892 }
893
894 /*
895 * page_dirty is initially a count of buffers on the page before
896 * EOF and is decremented as we move each into a cleanable state.
897 *
898 * Derivation:
899 *
900 * End offset is the highest offset that this page should represent.
901 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
902 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
903 * hence give us the correct page_dirty count. On any other page,
904 * it will be zero and in that case we need page_dirty to be the
905 * count of buffers on the page.
906 */
907 end_offset = min_t(unsigned long long,
908 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
909 len = 1 << inode->i_blkbits;
910 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
911 PAGE_CACHE_SIZE);
912 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
913 page_dirty = p_offset / len;
914
915 bh = head = page_buffers(page);
916 offset = page_offset(page);
917 flags = -1;
918 type = 0;
919
920 /* TODO: cleanup count and page_dirty */
921
922 do {
923 if (offset >= end_offset)
924 break;
925 if (!buffer_uptodate(bh))
926 uptodate = 0;
927 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
928 /*
929 * the iomap is actually still valid, but the ioend
930 * isn't. shouldn't happen too often.
931 */
932 iomap_valid = 0;
933 continue;
934 }
935
936 if (iomap_valid)
937 iomap_valid = xfs_iomap_valid(&iomap, offset);
938
939 /*
940 * First case, map an unwritten extent and prepare for
941 * extent state conversion transaction on completion.
942 *
943 * Second case, allocate space for a delalloc buffer.
944 * We can return EAGAIN here in the release page case.
945 *
946 * Third case, an unmapped buffer was found, and we are
947 * in a path where we need to write the whole page out.
948 */
949 if (buffer_unwritten(bh) || buffer_delay(bh) ||
950 ((buffer_uptodate(bh) || PageUptodate(page)) &&
951 !buffer_mapped(bh) && (unmapped || startio))) {
952 /*
953 * Make sure we don't use a read-only iomap
954 */
955 if (flags == BMAPI_READ)
956 iomap_valid = 0;
957
958 if (buffer_unwritten(bh)) {
959 type = IOMAP_UNWRITTEN;
960 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
961 } else if (buffer_delay(bh)) {
962 type = IOMAP_DELAY;
963 flags = BMAPI_ALLOCATE | trylock;
964 } else {
965 type = IOMAP_NEW;
966 flags = BMAPI_WRITE | BMAPI_MMAP;
967 }
968
969 if (!iomap_valid) {
970 if (type == IOMAP_NEW) {
971 size = xfs_probe_cluster(inode,
972 page, bh, head, 0);
973 } else {
974 size = len;
975 }
976
977 err = xfs_map_blocks(inode, offset, size,
978 &iomap, flags);
979 if (err)
980 goto error;
981 iomap_valid = xfs_iomap_valid(&iomap, offset);
982 }
983 if (iomap_valid) {
984 xfs_map_at_offset(bh, offset,
985 inode->i_blkbits, &iomap);
986 if (startio) {
987 xfs_add_to_ioend(inode, bh, offset,
988 type, &ioend,
989 !iomap_valid);
990 } else {
991 set_buffer_dirty(bh);
992 unlock_buffer(bh);
993 mark_buffer_dirty(bh);
994 }
995 page_dirty--;
996 count++;
997 }
998 } else if (buffer_uptodate(bh) && startio) {
999 /*
1000 * we got here because the buffer is already mapped.
1001 * That means it must already have extents allocated
1002 * underneath it. Map the extent by reading it.
1003 */
1004 if (!iomap_valid || type != 0) {
1005 flags = BMAPI_READ;
1006 size = xfs_probe_cluster(inode, page, bh,
1007 head, 1);
1008 err = xfs_map_blocks(inode, offset, size,
1009 &iomap, flags);
1010 if (err)
1011 goto error;
1012 iomap_valid = xfs_iomap_valid(&iomap, offset);
1013 }
1014
1015 type = 0;
1016 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1017 ASSERT(buffer_mapped(bh));
1018 if (iomap_valid)
1019 all_bh = 1;
1020 xfs_add_to_ioend(inode, bh, offset, type,
1021 &ioend, !iomap_valid);
1022 page_dirty--;
1023 count++;
1024 } else {
1025 iomap_valid = 0;
1026 }
1027 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1028 (unmapped || startio)) {
1029 iomap_valid = 0;
1030 }
1031
1032 if (!iohead)
1033 iohead = ioend;
1034
1035 } while (offset += len, ((bh = bh->b_this_page) != head));
1036
1037 if (uptodate && bh == head)
1038 SetPageUptodate(page);
1039
1040 if (startio)
1041 xfs_start_page_writeback(page, wbc, 1, count);
1042
1043 if (ioend && iomap_valid) {
1044 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1045 PAGE_CACHE_SHIFT;
1046 tlast = min_t(pgoff_t, offset, last_index);
1047 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1048 wbc, startio, all_bh, tlast);
1049 }
1050
1051 if (iohead)
1052 xfs_submit_ioend(iohead);
1053
1054 return page_dirty;
1055
1056 error:
1057 if (iohead)
1058 xfs_cancel_ioend(iohead);
1059
1060 /*
1061 * If it's delalloc and we have nowhere to put it,
1062 * throw it away, unless the lower layers told
1063 * us to try again.
1064 */
1065 if (err != -EAGAIN) {
1066 if (!unmapped)
1067 block_invalidatepage(page, 0);
1068 ClearPageUptodate(page);
1069 }
1070 return err;
1071 }
1072
1073 /*
1074 * writepage: Called from one of two places:
1075 *
1076 * 1. we are flushing a delalloc buffer head.
1077 *
1078 * 2. we are writing out a dirty page. Typically the page dirty
1079 * state is cleared before we get here. In this case is it
1080 * conceivable we have no buffer heads.
1081 *
1082 * For delalloc space on the page we need to allocate space and
1083 * flush it. For unmapped buffer heads on the page we should
1084 * allocate space if the page is uptodate. For any other dirty
1085 * buffer heads on the page we should flush them.
1086 *
1087 * If we detect that a transaction would be required to flush
1088 * the page, we have to check the process flags first, if we
1089 * are already in a transaction or disk I/O during allocations
1090 * is off, we need to fail the writepage and redirty the page.
1091 */
1092
1093 STATIC int
1094 xfs_vm_writepage(
1095 struct page *page,
1096 struct writeback_control *wbc)
1097 {
1098 int error;
1099 int need_trans;
1100 int delalloc, unmapped, unwritten;
1101 struct inode *inode = page->mapping->host;
1102
1103 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1104
1105 /*
1106 * We need a transaction if:
1107 * 1. There are delalloc buffers on the page
1108 * 2. The page is uptodate and we have unmapped buffers
1109 * 3. The page is uptodate and we have no buffers
1110 * 4. There are unwritten buffers on the page
1111 */
1112
1113 if (!page_has_buffers(page)) {
1114 unmapped = 1;
1115 need_trans = 1;
1116 } else {
1117 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1118 if (!PageUptodate(page))
1119 unmapped = 0;
1120 need_trans = delalloc + unmapped + unwritten;
1121 }
1122
1123 /*
1124 * If we need a transaction and the process flags say
1125 * we are already in a transaction, or no IO is allowed
1126 * then mark the page dirty again and leave the page
1127 * as is.
1128 */
1129 if (current_test_flags(PF_FSTRANS) && need_trans)
1130 goto out_fail;
1131
1132 /*
1133 * Delay hooking up buffer heads until we have
1134 * made our go/no-go decision.
1135 */
1136 if (!page_has_buffers(page))
1137 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1138
1139 /*
1140 * Convert delayed allocate, unwritten or unmapped space
1141 * to real space and flush out to disk.
1142 */
1143 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1144 if (error == -EAGAIN)
1145 goto out_fail;
1146 if (unlikely(error < 0))
1147 goto out_unlock;
1148
1149 return 0;
1150
1151 out_fail:
1152 redirty_page_for_writepage(wbc, page);
1153 unlock_page(page);
1154 return 0;
1155 out_unlock:
1156 unlock_page(page);
1157 return error;
1158 }
1159
1160 STATIC int
1161 xfs_vm_writepages(
1162 struct address_space *mapping,
1163 struct writeback_control *wbc)
1164 {
1165 struct bhv_vnode *vp = vn_from_inode(mapping->host);
1166
1167 if (VN_TRUNC(vp))
1168 VUNTRUNCATE(vp);
1169 return generic_writepages(mapping, wbc);
1170 }
1171
1172 /*
1173 * Called to move a page into cleanable state - and from there
1174 * to be released. Possibly the page is already clean. We always
1175 * have buffer heads in this call.
1176 *
1177 * Returns 0 if the page is ok to release, 1 otherwise.
1178 *
1179 * Possible scenarios are:
1180 *
1181 * 1. We are being called to release a page which has been written
1182 * to via regular I/O. buffer heads will be dirty and possibly
1183 * delalloc. If no delalloc buffer heads in this case then we
1184 * can just return zero.
1185 *
1186 * 2. We are called to release a page which has been written via
1187 * mmap, all we need to do is ensure there is no delalloc
1188 * state in the buffer heads, if not we can let the caller
1189 * free them and we should come back later via writepage.
1190 */
1191 STATIC int
1192 xfs_vm_releasepage(
1193 struct page *page,
1194 gfp_t gfp_mask)
1195 {
1196 struct inode *inode = page->mapping->host;
1197 int dirty, delalloc, unmapped, unwritten;
1198 struct writeback_control wbc = {
1199 .sync_mode = WB_SYNC_ALL,
1200 .nr_to_write = 1,
1201 };
1202
1203 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1204
1205 if (!page_has_buffers(page))
1206 return 0;
1207
1208 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1209 if (!delalloc && !unwritten)
1210 goto free_buffers;
1211
1212 if (!(gfp_mask & __GFP_FS))
1213 return 0;
1214
1215 /* If we are already inside a transaction or the thread cannot
1216 * do I/O, we cannot release this page.
1217 */
1218 if (current_test_flags(PF_FSTRANS))
1219 return 0;
1220
1221 /*
1222 * Convert delalloc space to real space, do not flush the
1223 * data out to disk, that will be done by the caller.
1224 * Never need to allocate space here - we will always
1225 * come back to writepage in that case.
1226 */
1227 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1228 if (dirty == 0 && !unwritten)
1229 goto free_buffers;
1230 return 0;
1231
1232 free_buffers:
1233 return try_to_free_buffers(page);
1234 }
1235
1236 STATIC int
1237 __xfs_get_blocks(
1238 struct inode *inode,
1239 sector_t iblock,
1240 struct buffer_head *bh_result,
1241 int create,
1242 int direct,
1243 bmapi_flags_t flags)
1244 {
1245 bhv_vnode_t *vp = vn_from_inode(inode);
1246 xfs_iomap_t iomap;
1247 xfs_off_t offset;
1248 ssize_t size;
1249 int niomap = 1;
1250 int error;
1251
1252 offset = (xfs_off_t)iblock << inode->i_blkbits;
1253 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1254 size = bh_result->b_size;
1255 error = bhv_vop_bmap(vp, offset, size,
1256 create ? flags : BMAPI_READ, &iomap, &niomap);
1257 if (error)
1258 return -error;
1259 if (niomap == 0)
1260 return 0;
1261
1262 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1263 /*
1264 * For unwritten extents do not report a disk address on
1265 * the read case (treat as if we're reading into a hole).
1266 */
1267 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1268 xfs_map_buffer(bh_result, &iomap, offset,
1269 inode->i_blkbits);
1270 }
1271 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1272 if (direct)
1273 bh_result->b_private = inode;
1274 set_buffer_unwritten(bh_result);
1275 set_buffer_delay(bh_result);
1276 }
1277 }
1278
1279 /*
1280 * If this is a realtime file, data may be on a different device.
1281 * to that pointed to from the buffer_head b_bdev currently.
1282 */
1283 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1284
1285 /*
1286 * If we previously allocated a block out beyond eof and we are
1287 * now coming back to use it then we will need to flag it as new
1288 * even if it has a disk address.
1289 */
1290 if (create &&
1291 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1292 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
1293 set_buffer_new(bh_result);
1294
1295 if (iomap.iomap_flags & IOMAP_DELAY) {
1296 BUG_ON(direct);
1297 if (create) {
1298 set_buffer_uptodate(bh_result);
1299 set_buffer_mapped(bh_result);
1300 set_buffer_delay(bh_result);
1301 }
1302 }
1303
1304 if (direct || size > (1 << inode->i_blkbits)) {
1305 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1306 offset = min_t(xfs_off_t,
1307 iomap.iomap_bsize - iomap.iomap_delta, size);
1308 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1309 }
1310
1311 return 0;
1312 }
1313
1314 int
1315 xfs_get_blocks(
1316 struct inode *inode,
1317 sector_t iblock,
1318 struct buffer_head *bh_result,
1319 int create)
1320 {
1321 return __xfs_get_blocks(inode, iblock,
1322 bh_result, create, 0, BMAPI_WRITE);
1323 }
1324
1325 STATIC int
1326 xfs_get_blocks_direct(
1327 struct inode *inode,
1328 sector_t iblock,
1329 struct buffer_head *bh_result,
1330 int create)
1331 {
1332 return __xfs_get_blocks(inode, iblock,
1333 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1334 }
1335
1336 STATIC void
1337 xfs_end_io_direct(
1338 struct kiocb *iocb,
1339 loff_t offset,
1340 ssize_t size,
1341 void *private)
1342 {
1343 xfs_ioend_t *ioend = iocb->private;
1344
1345 /*
1346 * Non-NULL private data means we need to issue a transaction to
1347 * convert a range from unwritten to written extents. This needs
1348 * to happen from process context but aio+dio I/O completion
1349 * happens from irq context so we need to defer it to a workqueue.
1350 * This is not necessary for synchronous direct I/O, but we do
1351 * it anyway to keep the code uniform and simpler.
1352 *
1353 * The core direct I/O code might be changed to always call the
1354 * completion handler in the future, in which case all this can
1355 * go away.
1356 */
1357 if (private && size > 0) {
1358 ioend->io_offset = offset;
1359 ioend->io_size = size;
1360 xfs_finish_ioend(ioend);
1361 } else {
1362 xfs_destroy_ioend(ioend);
1363 }
1364
1365 /*
1366 * blockdev_direct_IO can return an error even after the I/O
1367 * completion handler was called. Thus we need to protect
1368 * against double-freeing.
1369 */
1370 iocb->private = NULL;
1371 }
1372
1373 STATIC ssize_t
1374 xfs_vm_direct_IO(
1375 int rw,
1376 struct kiocb *iocb,
1377 const struct iovec *iov,
1378 loff_t offset,
1379 unsigned long nr_segs)
1380 {
1381 struct file *file = iocb->ki_filp;
1382 struct inode *inode = file->f_mapping->host;
1383 bhv_vnode_t *vp = vn_from_inode(inode);
1384 xfs_iomap_t iomap;
1385 int maps = 1;
1386 int error;
1387 ssize_t ret;
1388
1389 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
1390 if (error)
1391 return -error;
1392
1393 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1394
1395 if (rw == WRITE) {
1396 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1397 iomap.iomap_target->bt_bdev,
1398 iov, offset, nr_segs,
1399 xfs_get_blocks_direct,
1400 xfs_end_io_direct);
1401 } else {
1402 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1403 iomap.iomap_target->bt_bdev,
1404 iov, offset, nr_segs,
1405 xfs_get_blocks_direct,
1406 xfs_end_io_direct);
1407 }
1408
1409 if (unlikely(ret <= 0 && iocb->private))
1410 xfs_destroy_ioend(iocb->private);
1411 return ret;
1412 }
1413
1414 STATIC int
1415 xfs_vm_prepare_write(
1416 struct file *file,
1417 struct page *page,
1418 unsigned int from,
1419 unsigned int to)
1420 {
1421 return block_prepare_write(page, from, to, xfs_get_blocks);
1422 }
1423
1424 STATIC sector_t
1425 xfs_vm_bmap(
1426 struct address_space *mapping,
1427 sector_t block)
1428 {
1429 struct inode *inode = (struct inode *)mapping->host;
1430 bhv_vnode_t *vp = vn_from_inode(inode);
1431
1432 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
1433 bhv_vop_rwlock(vp, VRWLOCK_READ);
1434 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1435 bhv_vop_rwunlock(vp, VRWLOCK_READ);
1436 return generic_block_bmap(mapping, block, xfs_get_blocks);
1437 }
1438
1439 STATIC int
1440 xfs_vm_readpage(
1441 struct file *unused,
1442 struct page *page)
1443 {
1444 return mpage_readpage(page, xfs_get_blocks);
1445 }
1446
1447 STATIC int
1448 xfs_vm_readpages(
1449 struct file *unused,
1450 struct address_space *mapping,
1451 struct list_head *pages,
1452 unsigned nr_pages)
1453 {
1454 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1455 }
1456
1457 STATIC void
1458 xfs_vm_invalidatepage(
1459 struct page *page,
1460 unsigned long offset)
1461 {
1462 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1463 page->mapping->host, page, offset);
1464 block_invalidatepage(page, offset);
1465 }
1466
1467 const struct address_space_operations xfs_address_space_operations = {
1468 .readpage = xfs_vm_readpage,
1469 .readpages = xfs_vm_readpages,
1470 .writepage = xfs_vm_writepage,
1471 .writepages = xfs_vm_writepages,
1472 .sync_page = block_sync_page,
1473 .releasepage = xfs_vm_releasepage,
1474 .invalidatepage = xfs_vm_invalidatepage,
1475 .prepare_write = xfs_vm_prepare_write,
1476 .commit_write = generic_commit_write,
1477 .bmap = xfs_vm_bmap,
1478 .direct_IO = xfs_vm_direct_IO,
1479 .migratepage = buffer_migrate_page,
1480 };
This page took 0.133063 seconds and 5 git commands to generate.