1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/backing-dev.h>
6 #include <linux/pagemap.h>
7 #include <linux/writeback.h> /* generic_writepages */
8 #include <linux/slab.h>
9 #include <linux/pagevec.h>
10 #include <linux/task_io_accounting_ops.h>
13 #include "mds_client.h"
14 #include <linux/ceph/osd_client.h>
17 * Ceph address space ops.
19 * There are a few funny things going on here.
21 * The page->private field is used to reference a struct
22 * ceph_snap_context for _every_ dirty page. This indicates which
23 * snapshot the page was logically dirtied in, and thus which snap
24 * context needs to be associated with the osd write during writeback.
26 * Similarly, struct ceph_inode_info maintains a set of counters to
27 * count dirty pages on the inode. In the absence of snapshots,
28 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
30 * When a snapshot is taken (that is, when the client receives
31 * notification that a snapshot was taken), each inode with caps and
32 * with dirty pages (dirty pages implies there is a cap) gets a new
33 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
34 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
35 * moved to capsnap->dirty. (Unless a sync write is currently in
36 * progress. In that case, the capsnap is said to be "pending", new
37 * writes cannot start, and the capsnap isn't "finalized" until the
38 * write completes (or fails) and a final size/mtime for the inode for
39 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
41 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
42 * we look for the first capsnap in i_cap_snaps and write out pages in
43 * that snap context _only_. Then we move on to the next capsnap,
44 * eventually reaching the "live" or "head" context (i.e., pages that
45 * are not yet snapped) and are writing the most recently dirtied
48 * Invalidate and so forth must take care to ensure the dirty page
49 * accounting is preserved.
52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
53 #define CONGESTION_OFF_THRESH(congestion_kb) \
54 (CONGESTION_ON_THRESH(congestion_kb) - \
55 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
57 static inline struct ceph_snap_context
*page_snap_context(struct page
*page
)
59 if (PagePrivate(page
))
60 return (void *)page
->private;
65 * Dirty a page. Optimistically adjust accounting, on the assumption
66 * that we won't race with invalidate. If we do, readjust.
68 static int ceph_set_page_dirty(struct page
*page
)
70 struct address_space
*mapping
= page
->mapping
;
72 struct ceph_inode_info
*ci
;
74 struct ceph_snap_context
*snapc
;
76 if (unlikely(!mapping
))
77 return !TestSetPageDirty(page
);
79 if (TestSetPageDirty(page
)) {
80 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
81 mapping
->host
, page
, page
->index
);
85 inode
= mapping
->host
;
86 ci
= ceph_inode(inode
);
89 * Note that we're grabbing a snapc ref here without holding
92 snapc
= ceph_get_snap_context(ci
->i_snap_realm
->cached_context
);
95 spin_lock(&ci
->i_ceph_lock
);
96 if (ci
->i_head_snapc
== NULL
)
97 ci
->i_head_snapc
= ceph_get_snap_context(snapc
);
98 ++ci
->i_wrbuffer_ref_head
;
99 if (ci
->i_wrbuffer_ref
== 0)
101 ++ci
->i_wrbuffer_ref
;
102 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
103 "snapc %p seq %lld (%d snaps)\n",
104 mapping
->host
, page
, page
->index
,
105 ci
->i_wrbuffer_ref
-1, ci
->i_wrbuffer_ref_head
-1,
106 ci
->i_wrbuffer_ref
, ci
->i_wrbuffer_ref_head
,
107 snapc
, snapc
->seq
, snapc
->num_snaps
);
108 spin_unlock(&ci
->i_ceph_lock
);
110 /* now adjust page */
111 spin_lock_irq(&mapping
->tree_lock
);
112 if (page
->mapping
) { /* Race with truncate? */
113 WARN_ON_ONCE(!PageUptodate(page
));
114 account_page_dirtied(page
, page
->mapping
);
115 radix_tree_tag_set(&mapping
->page_tree
,
116 page_index(page
), PAGECACHE_TAG_DIRTY
);
119 * Reference snap context in page->private. Also set
120 * PagePrivate so that we get invalidatepage callback.
122 page
->private = (unsigned long)snapc
;
123 SetPagePrivate(page
);
125 dout("ANON set_page_dirty %p (raced truncate?)\n", page
);
129 spin_unlock_irq(&mapping
->tree_lock
);
132 /* whoops, we failed to dirty the page */
133 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
135 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
137 BUG_ON(!PageDirty(page
));
142 * If we are truncating the full page (i.e. offset == 0), adjust the
143 * dirty page counters appropriately. Only called if there is private
146 static void ceph_invalidatepage(struct page
*page
, unsigned long offset
)
149 struct ceph_inode_info
*ci
;
150 struct ceph_snap_context
*snapc
= page_snap_context(page
);
152 BUG_ON(!PageLocked(page
));
153 BUG_ON(!PagePrivate(page
));
154 BUG_ON(!page
->mapping
);
156 inode
= page
->mapping
->host
;
159 * We can get non-dirty pages here due to races between
160 * set_page_dirty and truncate_complete_page; just spit out a
161 * warning, in case we end up with accounting problems later.
163 if (!PageDirty(page
))
164 pr_err("%p invalidatepage %p page not dirty\n", inode
, page
);
167 ClearPageChecked(page
);
169 ci
= ceph_inode(inode
);
171 dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
172 inode
, page
, page
->index
, offset
);
173 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
174 ceph_put_snap_context(snapc
);
176 ClearPagePrivate(page
);
178 dout("%p invalidatepage %p idx %lu partial dirty page\n",
179 inode
, page
, page
->index
);
183 /* just a sanity check */
184 static int ceph_releasepage(struct page
*page
, gfp_t g
)
186 struct inode
*inode
= page
->mapping
? page
->mapping
->host
: NULL
;
187 dout("%p releasepage %p idx %lu\n", inode
, page
, page
->index
);
188 WARN_ON(PageDirty(page
));
189 WARN_ON(PagePrivate(page
));
194 * read a single page, without unlocking it.
196 static int readpage_nounlock(struct file
*filp
, struct page
*page
)
198 struct inode
*inode
= file_inode(filp
);
199 struct ceph_inode_info
*ci
= ceph_inode(inode
);
200 struct ceph_osd_client
*osdc
=
201 &ceph_inode_to_client(inode
)->client
->osdc
;
203 u64 len
= PAGE_CACHE_SIZE
;
205 dout("readpage inode %p file %p page %p index %lu\n",
206 inode
, filp
, page
, page
->index
);
207 err
= ceph_osdc_readpages(osdc
, ceph_vino(inode
), &ci
->i_layout
,
208 (u64
) page_offset(page
), &len
,
209 ci
->i_truncate_seq
, ci
->i_truncate_size
,
216 } else if (err
< PAGE_CACHE_SIZE
) {
217 /* zero fill remainder of page */
218 zero_user_segment(page
, err
, PAGE_CACHE_SIZE
);
220 SetPageUptodate(page
);
223 return err
< 0 ? err
: 0;
226 static int ceph_readpage(struct file
*filp
, struct page
*page
)
228 int r
= readpage_nounlock(filp
, page
);
234 * Finish an async read(ahead) op.
236 static void finish_read(struct ceph_osd_request
*req
, struct ceph_msg
*msg
)
238 struct inode
*inode
= req
->r_inode
;
239 int rc
= req
->r_result
;
240 int bytes
= le32_to_cpu(msg
->hdr
.data_len
);
243 dout("finish_read %p req %p rc %d bytes %d\n", inode
, req
, rc
, bytes
);
245 /* unlock all pages, zeroing any data we didn't read */
246 for (i
= 0; i
< req
->r_num_pages
; i
++, bytes
-= PAGE_CACHE_SIZE
) {
247 struct page
*page
= req
->r_pages
[i
];
249 if (bytes
< (int)PAGE_CACHE_SIZE
) {
250 /* zero (remainder of) page */
251 int s
= bytes
< 0 ? 0 : bytes
;
252 zero_user_segment(page
, s
, PAGE_CACHE_SIZE
);
254 dout("finish_read %p uptodate %p idx %lu\n", inode
, page
,
256 flush_dcache_page(page
);
257 SetPageUptodate(page
);
259 page_cache_release(page
);
264 static void ceph_unlock_page_vector(struct page
**pages
, int num_pages
)
268 for (i
= 0; i
< num_pages
; i
++)
269 unlock_page(pages
[i
]);
273 * start an async read(ahead) operation. return nr_pages we submitted
274 * a read for on success, or negative error code.
276 static int start_read(struct inode
*inode
, struct list_head
*page_list
, int max
)
278 struct ceph_osd_client
*osdc
=
279 &ceph_inode_to_client(inode
)->client
->osdc
;
280 struct ceph_inode_info
*ci
= ceph_inode(inode
);
281 struct page
*page
= list_entry(page_list
->prev
, struct page
, lru
);
282 struct ceph_osd_request
*req
;
291 off
= (u64
) page_offset(page
);
294 next_index
= page
->index
;
295 list_for_each_entry_reverse(page
, page_list
, lru
) {
296 if (page
->index
!= next_index
)
300 if (max
&& nr_pages
== max
)
303 len
= nr_pages
<< PAGE_CACHE_SHIFT
;
304 dout("start_read %p nr_pages %d is %lld~%lld\n", inode
, nr_pages
,
307 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
, ceph_vino(inode
),
309 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
311 ci
->i_truncate_seq
, ci
->i_truncate_size
,
316 /* build page vector */
317 nr_pages
= len
>> PAGE_CACHE_SHIFT
;
318 pages
= kmalloc(sizeof(*pages
) * nr_pages
, GFP_NOFS
);
322 for (i
= 0; i
< nr_pages
; ++i
) {
323 page
= list_entry(page_list
->prev
, struct page
, lru
);
324 BUG_ON(PageLocked(page
));
325 list_del(&page
->lru
);
327 dout("start_read %p adding %p idx %lu\n", inode
, page
,
329 if (add_to_page_cache_lru(page
, &inode
->i_data
, page
->index
,
331 page_cache_release(page
);
332 dout("start_read %p add_to_page_cache failed %p\n",
339 req
->r_pages
= pages
;
340 req
->r_num_pages
= nr_pages
;
341 req
->r_callback
= finish_read
;
342 req
->r_inode
= inode
;
344 dout("start_read %p starting %p %lld~%lld\n", inode
, req
, off
, len
);
345 ret
= ceph_osdc_start_request(osdc
, req
, false);
348 ceph_osdc_put_request(req
);
352 ceph_unlock_page_vector(pages
, nr_pages
);
353 ceph_release_page_vector(pages
, nr_pages
);
355 ceph_osdc_put_request(req
);
361 * Read multiple pages. Leave pages we don't read + unlock in page_list;
362 * the caller (VM) cleans them up.
364 static int ceph_readpages(struct file
*file
, struct address_space
*mapping
,
365 struct list_head
*page_list
, unsigned nr_pages
)
367 struct inode
*inode
= file_inode(file
);
368 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
372 if (fsc
->mount_options
->rsize
>= PAGE_CACHE_SIZE
)
373 max
= (fsc
->mount_options
->rsize
+ PAGE_CACHE_SIZE
- 1)
376 dout("readpages %p file %p nr_pages %d max %d\n", inode
, file
, nr_pages
,
378 while (!list_empty(page_list
)) {
379 rc
= start_read(inode
, page_list
, max
);
385 dout("readpages %p file %p ret %d\n", inode
, file
, rc
);
390 * Get ref for the oldest snapc for an inode with dirty data... that is, the
391 * only snap context we are allowed to write back.
393 static struct ceph_snap_context
*get_oldest_context(struct inode
*inode
,
396 struct ceph_inode_info
*ci
= ceph_inode(inode
);
397 struct ceph_snap_context
*snapc
= NULL
;
398 struct ceph_cap_snap
*capsnap
= NULL
;
400 spin_lock(&ci
->i_ceph_lock
);
401 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
402 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap
,
403 capsnap
->context
, capsnap
->dirty_pages
);
404 if (capsnap
->dirty_pages
) {
405 snapc
= ceph_get_snap_context(capsnap
->context
);
407 *snap_size
= capsnap
->size
;
411 if (!snapc
&& ci
->i_wrbuffer_ref_head
) {
412 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
413 dout(" head snapc %p has %d dirty pages\n",
414 snapc
, ci
->i_wrbuffer_ref_head
);
416 spin_unlock(&ci
->i_ceph_lock
);
421 * Write a single page, but leave the page locked.
423 * If we get a write error, set the page error bit, but still adjust the
424 * dirty page accounting (i.e., page is no longer dirty).
426 static int writepage_nounlock(struct page
*page
, struct writeback_control
*wbc
)
429 struct ceph_inode_info
*ci
;
430 struct ceph_fs_client
*fsc
;
431 struct ceph_osd_client
*osdc
;
432 loff_t page_off
= page_offset(page
);
433 int len
= PAGE_CACHE_SIZE
;
436 struct ceph_snap_context
*snapc
, *oldest
;
440 dout("writepage %p idx %lu\n", page
, page
->index
);
442 if (!page
->mapping
|| !page
->mapping
->host
) {
443 dout("writepage %p - no mapping\n", page
);
446 inode
= page
->mapping
->host
;
447 ci
= ceph_inode(inode
);
448 fsc
= ceph_inode_to_client(inode
);
449 osdc
= &fsc
->client
->osdc
;
451 /* verify this is a writeable snap context */
452 snapc
= page_snap_context(page
);
454 dout("writepage %p page %p not dirty?\n", inode
, page
);
457 oldest
= get_oldest_context(inode
, &snap_size
);
458 if (snapc
->seq
> oldest
->seq
) {
459 dout("writepage %p page %p snapc %p not writeable - noop\n",
461 /* we should only noop if called by kswapd */
462 WARN_ON((current
->flags
& PF_MEMALLOC
) == 0);
463 ceph_put_snap_context(oldest
);
466 ceph_put_snap_context(oldest
);
468 /* is this a partial page at end of file? */
472 i_size
= i_size_read(inode
);
473 if (i_size
< page_off
+ len
)
474 len
= i_size
- page_off
;
476 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
477 inode
, page
, page
->index
, page_off
, len
, snapc
);
479 writeback_stat
= atomic_long_inc_return(&fsc
->writeback_count
);
481 CONGESTION_ON_THRESH(fsc
->mount_options
->congestion_kb
))
482 set_bdi_congested(&fsc
->backing_dev_info
, BLK_RW_ASYNC
);
484 set_page_writeback(page
);
485 err
= ceph_osdc_writepages(osdc
, ceph_vino(inode
),
486 &ci
->i_layout
, snapc
,
488 ci
->i_truncate_seq
, ci
->i_truncate_size
,
489 &inode
->i_mtime
, &page
, 1);
491 dout("writepage setting page/mapping error %d %p\n", err
, page
);
493 mapping_set_error(&inode
->i_data
, err
);
495 wbc
->pages_skipped
++;
497 dout("writepage cleaned page %p\n", page
);
498 err
= 0; /* vfs expects us to return 0 */
501 ClearPagePrivate(page
);
502 end_page_writeback(page
);
503 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
504 ceph_put_snap_context(snapc
); /* page's reference */
509 static int ceph_writepage(struct page
*page
, struct writeback_control
*wbc
)
512 struct inode
*inode
= page
->mapping
->host
;
515 err
= writepage_nounlock(page
, wbc
);
523 * lame release_pages helper. release_pages() isn't exported to
526 static void ceph_release_pages(struct page
**pages
, int num
)
531 pagevec_init(&pvec
, 0);
532 for (i
= 0; i
< num
; i
++) {
533 if (pagevec_add(&pvec
, pages
[i
]) == 0)
534 pagevec_release(&pvec
);
536 pagevec_release(&pvec
);
541 * async writeback completion handler.
543 * If we get an error, set the mapping error bit, but not the individual
546 static void writepages_finish(struct ceph_osd_request
*req
,
547 struct ceph_msg
*msg
)
549 struct inode
*inode
= req
->r_inode
;
550 struct ceph_inode_info
*ci
= ceph_inode(inode
);
554 struct ceph_snap_context
*snapc
= req
->r_snapc
;
555 struct address_space
*mapping
= inode
->i_mapping
;
556 int rc
= req
->r_result
;
557 u64 bytes
= le64_to_cpu(req
->r_request_ops
[0].extent
.length
);
558 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
560 unsigned issued
= ceph_caps_issued(ci
);
564 * Assume we wrote the pages we originally sent. The
565 * osd might reply with fewer pages if our writeback
566 * raced with a truncation and was adjusted at the osd,
567 * so don't believe the reply.
569 wrote
= req
->r_num_pages
;
572 mapping_set_error(mapping
, rc
);
574 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
575 inode
, rc
, bytes
, wrote
);
577 /* clean all pages */
578 for (i
= 0; i
< req
->r_num_pages
; i
++) {
579 page
= req
->r_pages
[i
];
581 WARN_ON(!PageUptodate(page
));
584 atomic_long_dec_return(&fsc
->writeback_count
);
586 CONGESTION_OFF_THRESH(fsc
->mount_options
->congestion_kb
))
587 clear_bdi_congested(&fsc
->backing_dev_info
,
590 ceph_put_snap_context(page_snap_context(page
));
592 ClearPagePrivate(page
);
593 dout("unlocking %d %p\n", i
, page
);
594 end_page_writeback(page
);
597 * We lost the cache cap, need to truncate the page before
598 * it is unlocked, otherwise we'd truncate it later in the
599 * page truncation thread, possibly losing some data that
602 if ((issued
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0)
603 generic_error_remove_page(inode
->i_mapping
, page
);
607 dout("%p wrote+cleaned %d pages\n", inode
, wrote
);
608 ceph_put_wrbuffer_cap_refs(ci
, req
->r_num_pages
, snapc
);
610 ceph_release_pages(req
->r_pages
, req
->r_num_pages
);
611 if (req
->r_pages_from_pool
)
612 mempool_free(req
->r_pages
,
613 ceph_sb_to_client(inode
->i_sb
)->wb_pagevec_pool
);
616 ceph_osdc_put_request(req
);
620 * allocate a page vec, either directly, or if necessary, via a the
621 * mempool. we avoid the mempool if we can because req->r_num_pages
622 * may be less than the maximum write size.
624 static void alloc_page_vec(struct ceph_fs_client
*fsc
,
625 struct ceph_osd_request
*req
)
627 req
->r_pages
= kmalloc(sizeof(struct page
*) * req
->r_num_pages
,
630 req
->r_pages
= mempool_alloc(fsc
->wb_pagevec_pool
, GFP_NOFS
);
631 req
->r_pages_from_pool
= 1;
632 WARN_ON(!req
->r_pages
);
637 * initiate async writeback
639 static int ceph_writepages_start(struct address_space
*mapping
,
640 struct writeback_control
*wbc
)
642 struct inode
*inode
= mapping
->host
;
643 struct ceph_inode_info
*ci
= ceph_inode(inode
);
644 struct ceph_fs_client
*fsc
;
645 pgoff_t index
, start
, end
;
648 pgoff_t max_pages
= 0, max_pages_ever
= 0;
649 struct ceph_snap_context
*snapc
= NULL
, *last_snapc
= NULL
, *pgsnapc
;
653 unsigned wsize
= 1 << inode
->i_blkbits
;
654 struct ceph_osd_request
*req
= NULL
;
659 * Include a 'sync' in the OSD request if this is a data
660 * integrity write (e.g., O_SYNC write or fsync()), or if our
661 * cap is being revoked.
663 do_sync
= wbc
->sync_mode
== WB_SYNC_ALL
;
664 if (ceph_caps_revoking(ci
, CEPH_CAP_FILE_BUFFER
))
666 dout("writepages_start %p dosync=%d (mode=%s)\n",
668 wbc
->sync_mode
== WB_SYNC_NONE
? "NONE" :
669 (wbc
->sync_mode
== WB_SYNC_ALL
? "ALL" : "HOLD"));
671 fsc
= ceph_inode_to_client(inode
);
672 if (fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
) {
673 pr_warning("writepage_start %p on forced umount\n", inode
);
674 return -EIO
; /* we're in a forced umount, don't write! */
676 if (fsc
->mount_options
->wsize
&& fsc
->mount_options
->wsize
< wsize
)
677 wsize
= fsc
->mount_options
->wsize
;
678 if (wsize
< PAGE_CACHE_SIZE
)
679 wsize
= PAGE_CACHE_SIZE
;
680 max_pages_ever
= wsize
>> PAGE_CACHE_SHIFT
;
682 pagevec_init(&pvec
, 0);
684 /* where to start/end? */
685 if (wbc
->range_cyclic
) {
686 start
= mapping
->writeback_index
; /* Start from prev offset */
688 dout(" cyclic, start at %lu\n", start
);
690 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
691 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
692 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
695 dout(" not cyclic, %lu to %lu\n", start
, end
);
700 /* find oldest snap context with dirty data */
701 ceph_put_snap_context(snapc
);
702 snapc
= get_oldest_context(inode
, &snap_size
);
704 /* hmm, why does writepages get called when there
706 dout(" no snap context with dirty data?\n");
709 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
710 snapc
, snapc
->seq
, snapc
->num_snaps
);
711 if (last_snapc
&& snapc
!= last_snapc
) {
712 /* if we switched to a newer snapc, restart our scan at the
713 * start of the original file range. */
714 dout(" snapc differs from last pass, restarting at %lu\n",
720 while (!done
&& index
<= end
) {
724 int pvec_pages
, locked_pages
;
732 max_pages
= max_pages_ever
;
736 want
= min(end
- index
,
737 min((pgoff_t
)PAGEVEC_SIZE
,
738 max_pages
- (pgoff_t
)locked_pages
) - 1)
740 pvec_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
743 dout("pagevec_lookup_tag got %d\n", pvec_pages
);
744 if (!pvec_pages
&& !locked_pages
)
746 for (i
= 0; i
< pvec_pages
&& locked_pages
< max_pages
; i
++) {
747 page
= pvec
.pages
[i
];
748 dout("? %p idx %lu\n", page
, page
->index
);
749 if (locked_pages
== 0)
750 lock_page(page
); /* first page */
751 else if (!trylock_page(page
))
754 /* only dirty pages, or our accounting breaks */
755 if (unlikely(!PageDirty(page
)) ||
756 unlikely(page
->mapping
!= mapping
)) {
757 dout("!dirty or !mapping %p\n", page
);
761 if (!wbc
->range_cyclic
&& page
->index
> end
) {
762 dout("end of range %p\n", page
);
767 if (next
&& (page
->index
!= next
)) {
768 dout("not consecutive %p\n", page
);
772 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
773 dout("waiting on writeback %p\n", page
);
774 wait_on_page_writeback(page
);
776 if ((snap_size
&& page_offset(page
) > snap_size
) ||
778 page_offset(page
) > i_size_read(inode
))) {
779 dout("%p page eof %llu\n", page
, snap_size
?
780 snap_size
: i_size_read(inode
));
785 if (PageWriteback(page
)) {
786 dout("%p under writeback\n", page
);
791 /* only if matching snap context */
792 pgsnapc
= page_snap_context(page
);
793 if (pgsnapc
->seq
> snapc
->seq
) {
794 dout("page snapc %p %lld > oldest %p %lld\n",
795 pgsnapc
, pgsnapc
->seq
, snapc
, snapc
->seq
);
798 continue; /* keep looking for snap */
802 if (!clear_page_dirty_for_io(page
)) {
803 dout("%p !clear_page_dirty_for_io\n", page
);
809 if (locked_pages
== 0) {
810 /* prepare async write request */
811 offset
= (u64
) page_offset(page
);
813 req
= ceph_osdc_new_request(&fsc
->client
->osdc
,
818 CEPH_OSD_FLAG_WRITE
|
819 CEPH_OSD_FLAG_ONDISK
,
823 &inode
->i_mtime
, true, 0);
831 max_pages
= req
->r_num_pages
;
833 alloc_page_vec(fsc
, req
);
834 req
->r_callback
= writepages_finish
;
835 req
->r_inode
= inode
;
838 /* note position of first page in pvec */
841 dout("%p will write page %p idx %lu\n",
842 inode
, page
, page
->index
);
845 atomic_long_inc_return(&fsc
->writeback_count
);
846 if (writeback_stat
> CONGESTION_ON_THRESH(
847 fsc
->mount_options
->congestion_kb
)) {
848 set_bdi_congested(&fsc
->backing_dev_info
,
852 set_page_writeback(page
);
853 req
->r_pages
[locked_pages
] = page
;
855 next
= page
->index
+ 1;
858 /* did we get anything? */
860 goto release_pvec_pages
;
863 BUG_ON(!locked_pages
|| first
< 0);
865 if (pvec_pages
&& i
== pvec_pages
&&
866 locked_pages
< max_pages
) {
867 dout("reached end pvec, trying for more\n");
868 pagevec_reinit(&pvec
);
872 /* shift unused pages over in the pvec... we
873 * will need to release them below. */
874 for (j
= i
; j
< pvec_pages
; j
++) {
875 dout(" pvec leftover page %p\n",
877 pvec
.pages
[j
-i
+first
] = pvec
.pages
[j
];
882 /* submit the write */
883 offset
= req
->r_pages
[0]->index
<< PAGE_CACHE_SHIFT
;
884 len
= min((snap_size
? snap_size
: i_size_read(inode
)) - offset
,
885 (u64
)locked_pages
<< PAGE_CACHE_SHIFT
);
886 dout("writepages got %d pages at %llu~%llu\n",
887 locked_pages
, offset
, len
);
889 /* revise final length, page count */
890 req
->r_num_pages
= locked_pages
;
891 req
->r_request_ops
[0].extent
.length
= cpu_to_le64(len
);
892 req
->r_request_ops
[0].payload_len
= cpu_to_le32(len
);
893 req
->r_request
->hdr
.data_len
= cpu_to_le32(len
);
895 rc
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, true);
901 wbc
->nr_to_write
-= locked_pages
;
902 if (wbc
->nr_to_write
<= 0)
906 dout("pagevec_release on %d pages (%p)\n", (int)pvec
.nr
,
907 pvec
.nr
? pvec
.pages
[0] : NULL
);
908 pagevec_release(&pvec
);
910 if (locked_pages
&& !done
)
914 if (should_loop
&& !done
) {
915 /* more to do; loop back to beginning of file */
916 dout("writepages looping back to beginning of file\n");
922 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
923 mapping
->writeback_index
= index
;
927 ceph_osdc_put_request(req
);
928 ceph_put_snap_context(snapc
);
929 dout("writepages done, rc = %d\n", rc
);
936 * See if a given @snapc is either writeable, or already written.
938 static int context_is_writeable_or_written(struct inode
*inode
,
939 struct ceph_snap_context
*snapc
)
941 struct ceph_snap_context
*oldest
= get_oldest_context(inode
, NULL
);
942 int ret
= !oldest
|| snapc
->seq
<= oldest
->seq
;
944 ceph_put_snap_context(oldest
);
949 * We are only allowed to write into/dirty the page if the page is
950 * clean, or already dirty within the same snap context.
952 * called with page locked.
953 * return success with page locked,
954 * or any failure (incl -EAGAIN) with page unlocked.
956 static int ceph_update_writeable_page(struct file
*file
,
957 loff_t pos
, unsigned len
,
960 struct inode
*inode
= file_inode(file
);
961 struct ceph_inode_info
*ci
= ceph_inode(inode
);
962 struct ceph_mds_client
*mdsc
= ceph_inode_to_client(inode
)->mdsc
;
963 loff_t page_off
= pos
& PAGE_CACHE_MASK
;
964 int pos_in_page
= pos
& ~PAGE_CACHE_MASK
;
965 int end_in_page
= pos_in_page
+ len
;
968 struct ceph_snap_context
*snapc
, *oldest
;
971 /* writepages currently holds page lock, but if we change that later, */
972 wait_on_page_writeback(page
);
974 /* check snap context */
975 BUG_ON(!ci
->i_snap_realm
);
976 down_read(&mdsc
->snap_rwsem
);
977 BUG_ON(!ci
->i_snap_realm
->cached_context
);
978 snapc
= page_snap_context(page
);
979 if (snapc
&& snapc
!= ci
->i_head_snapc
) {
981 * this page is already dirty in another (older) snap
982 * context! is it writeable now?
984 oldest
= get_oldest_context(inode
, NULL
);
985 up_read(&mdsc
->snap_rwsem
);
987 if (snapc
->seq
> oldest
->seq
) {
988 ceph_put_snap_context(oldest
);
989 dout(" page %p snapc %p not current or oldest\n",
992 * queue for writeback, and wait for snapc to
993 * be writeable or written
995 snapc
= ceph_get_snap_context(snapc
);
997 ceph_queue_writeback(inode
);
998 r
= wait_event_interruptible(ci
->i_cap_wq
,
999 context_is_writeable_or_written(inode
, snapc
));
1000 ceph_put_snap_context(snapc
);
1001 if (r
== -ERESTARTSYS
)
1005 ceph_put_snap_context(oldest
);
1007 /* yay, writeable, do it now (without dropping page lock) */
1008 dout(" page %p snapc %p not current, but oldest\n",
1010 if (!clear_page_dirty_for_io(page
))
1012 r
= writepage_nounlock(page
, NULL
);
1018 if (PageUptodate(page
)) {
1019 dout(" page %p already uptodate\n", page
);
1024 if (pos_in_page
== 0 && len
== PAGE_CACHE_SIZE
)
1027 /* past end of file? */
1028 i_size
= inode
->i_size
; /* caller holds i_mutex */
1030 if (i_size
+ len
> inode
->i_sb
->s_maxbytes
) {
1031 /* file is too big */
1036 if (page_off
>= i_size
||
1037 (pos_in_page
== 0 && (pos
+len
) >= i_size
&&
1038 end_in_page
- pos_in_page
!= PAGE_CACHE_SIZE
)) {
1039 dout(" zeroing %p 0 - %d and %d - %d\n",
1040 page
, pos_in_page
, end_in_page
, (int)PAGE_CACHE_SIZE
);
1041 zero_user_segments(page
,
1043 end_in_page
, PAGE_CACHE_SIZE
);
1047 /* we need to read it. */
1048 up_read(&mdsc
->snap_rwsem
);
1049 r
= readpage_nounlock(file
, page
);
1055 up_read(&mdsc
->snap_rwsem
);
1062 * We are only allowed to write into/dirty the page if the page is
1063 * clean, or already dirty within the same snap context.
1065 static int ceph_write_begin(struct file
*file
, struct address_space
*mapping
,
1066 loff_t pos
, unsigned len
, unsigned flags
,
1067 struct page
**pagep
, void **fsdata
)
1069 struct inode
*inode
= file_inode(file
);
1071 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1076 page
= grab_cache_page_write_begin(mapping
, index
, 0);
1081 dout("write_begin file %p inode %p page %p %d~%d\n", file
,
1082 inode
, page
, (int)pos
, (int)len
);
1084 r
= ceph_update_writeable_page(file
, pos
, len
, page
);
1085 } while (r
== -EAGAIN
);
1091 * we don't do anything in here that simple_write_end doesn't do
1092 * except adjust dirty page accounting and drop read lock on
1095 static int ceph_write_end(struct file
*file
, struct address_space
*mapping
,
1096 loff_t pos
, unsigned len
, unsigned copied
,
1097 struct page
*page
, void *fsdata
)
1099 struct inode
*inode
= file_inode(file
);
1100 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1101 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1102 unsigned from
= pos
& (PAGE_CACHE_SIZE
- 1);
1105 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file
,
1106 inode
, page
, (int)pos
, (int)copied
, (int)len
);
1108 /* zero the stale part of the page if we did a short copy */
1110 zero_user_segment(page
, from
+copied
, len
);
1112 /* did file size increase? */
1113 /* (no need for i_size_read(); we caller holds i_mutex */
1114 if (pos
+copied
> inode
->i_size
)
1115 check_cap
= ceph_inode_set_size(inode
, pos
+copied
);
1117 if (!PageUptodate(page
))
1118 SetPageUptodate(page
);
1120 set_page_dirty(page
);
1123 up_read(&mdsc
->snap_rwsem
);
1124 page_cache_release(page
);
1127 ceph_check_caps(ceph_inode(inode
), CHECK_CAPS_AUTHONLY
, NULL
);
1133 * we set .direct_IO to indicate direct io is supported, but since we
1134 * intercept O_DIRECT reads and writes early, this function should
1137 static ssize_t
ceph_direct_io(int rw
, struct kiocb
*iocb
,
1138 const struct iovec
*iov
,
1139 loff_t pos
, unsigned long nr_segs
)
1145 const struct address_space_operations ceph_aops
= {
1146 .readpage
= ceph_readpage
,
1147 .readpages
= ceph_readpages
,
1148 .writepage
= ceph_writepage
,
1149 .writepages
= ceph_writepages_start
,
1150 .write_begin
= ceph_write_begin
,
1151 .write_end
= ceph_write_end
,
1152 .set_page_dirty
= ceph_set_page_dirty
,
1153 .invalidatepage
= ceph_invalidatepage
,
1154 .releasepage
= ceph_releasepage
,
1155 .direct_IO
= ceph_direct_io
,
1164 * Reuse write_begin here for simplicity.
1166 static int ceph_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1168 struct inode
*inode
= file_inode(vma
->vm_file
);
1169 struct page
*page
= vmf
->page
;
1170 struct ceph_mds_client
*mdsc
= ceph_inode_to_client(inode
)->mdsc
;
1171 loff_t off
= page_offset(page
);
1175 /* Update time before taking page lock */
1176 file_update_time(vma
->vm_file
);
1178 size
= i_size_read(inode
);
1179 if (off
+ PAGE_CACHE_SIZE
<= size
)
1180 len
= PAGE_CACHE_SIZE
;
1182 len
= size
& ~PAGE_CACHE_MASK
;
1184 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode
,
1185 off
, len
, page
, page
->index
);
1189 ret
= VM_FAULT_NOPAGE
;
1191 (page
->mapping
!= inode
->i_mapping
))
1194 ret
= ceph_update_writeable_page(vma
->vm_file
, off
, len
, page
);
1196 /* success. we'll keep the page locked. */
1197 set_page_dirty(page
);
1198 up_read(&mdsc
->snap_rwsem
);
1199 ret
= VM_FAULT_LOCKED
;
1204 ret
= VM_FAULT_SIGBUS
;
1207 dout("page_mkwrite %p %llu~%llu = %d\n", inode
, off
, len
, ret
);
1208 if (ret
!= VM_FAULT_LOCKED
)
1213 static struct vm_operations_struct ceph_vmops
= {
1214 .fault
= filemap_fault
,
1215 .page_mkwrite
= ceph_page_mkwrite
,
1216 .remap_pages
= generic_file_remap_pages
,
1219 int ceph_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1221 struct address_space
*mapping
= file
->f_mapping
;
1223 if (!mapping
->a_ops
->readpage
)
1225 file_accessed(file
);
1226 vma
->vm_ops
= &ceph_vmops
;