ceph: scattered page writeback
[deliverable/linux.git] / fs / ceph / file.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
124e68e7 2
3d14c5d2 3#include <linux/module.h>
124e68e7 4#include <linux/sched.h>
5a0e3ad6 5#include <linux/slab.h>
124e68e7 6#include <linux/file.h>
5ef50c3b 7#include <linux/mount.h>
124e68e7
SW
8#include <linux/namei.h>
9#include <linux/writeback.h>
ad7a60de 10#include <linux/falloc.h>
124e68e7
SW
11
12#include "super.h"
13#include "mds_client.h"
99ccbd22 14#include "cache.h"
124e68e7
SW
15
16/*
17 * Ceph file operations
18 *
19 * Implement basic open/close functionality, and implement
20 * read/write.
21 *
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
24 *
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
27 * ack from the OSD.
28 *
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
31 *
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
35 */
36
b5b98989
ZC
37/*
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
40 */
41static size_t dio_get_pagev_size(const struct iov_iter *it)
42{
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
45 size_t size;
46
47 size = iov->iov_len - it->iov_offset;
48 /*
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
51 */
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54 size += iov->iov_len;
55 }
56 dout("dio_get_pagevlen len = %zu\n", size);
57 return size;
58}
59
60/*
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
64 */
65static struct page **
66dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
68{
69 struct iov_iter tmp_it = *it;
70 size_t align;
71 struct page **pages;
72 int ret = 0, idx, npages;
73
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 (PAGE_SIZE - 1);
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 if (!pages) {
79 pages = vmalloc(sizeof(*pages) * npages);
80 if (!pages)
81 return ERR_PTR(-ENOMEM);
82 }
83
84 for (idx = 0; idx < npages; ) {
85 size_t start;
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
88 if (ret < 0)
89 goto fail;
90
91 iov_iter_advance(&tmp_it, ret);
92 nbytes -= ret;
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 }
95
96 BUG_ON(nbytes != 0);
97 *num_pages = npages;
98 *page_align = align;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 return pages;
101fail:
102 ceph_put_page_vector(pages, idx, false);
103 return ERR_PTR(ret);
104}
124e68e7
SW
105
106/*
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
109 */
110static struct ceph_mds_request *
111prepare_open_request(struct super_block *sb, int flags, int create_mode)
112{
3d14c5d2
YS
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
121
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 if (IS_ERR(req))
124 goto out;
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
124e68e7
SW
128out:
129 return req;
130}
131
132/*
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
135 */
136static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{
138 struct ceph_file_info *cf;
139 int ret = 0;
99ccbd22
MT
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
143
144 switch (inode->i_mode & S_IFMT) {
145 case S_IFREG:
99ccbd22
MT
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
149 * races.
150 *
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
153 */
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
124e68e7
SW
157 case S_IFDIR:
158 dout("init_file %p %p 0%o (regular)\n", inode, file,
159 inode->i_mode);
687265e5 160 cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO);
124e68e7
SW
161 if (cf == NULL) {
162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
163 return -ENOMEM;
164 }
165 cf->fmode = fmode;
166 cf->next_offset = 2;
fdd4e158 167 cf->readdir_cache_idx = -1;
124e68e7
SW
168 file->private_data = cf;
169 BUG_ON(inode->i_fop->release != ceph_release);
170 break;
171
172 case S_IFLNK:
173 dout("init_file %p %p 0%o (symlink)\n", inode, file,
174 inode->i_mode);
175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
176 break;
177
178 default:
179 dout("init_file %p %p 0%o (special)\n", inode, file,
180 inode->i_mode);
181 /*
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
184 */
185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 BUG_ON(inode->i_fop->release == ceph_release);
187
188 /* call the proper open fop */
189 ret = inode->i_fop->open(inode, file);
190 }
191 return ret;
192}
193
194/*
124e68e7
SW
195 * If we already have the requisite capabilities, we can satisfy
196 * the open request locally (no need to request new caps from the
197 * MDS). We do, however, need to inform the MDS (asynchronously)
198 * if our wanted caps set expands.
199 */
200int ceph_open(struct inode *inode, struct file *file)
201{
202 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
204 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7
SW
205 struct ceph_mds_request *req;
206 struct ceph_file_info *cf = file->private_data;
124e68e7
SW
207 int err;
208 int flags, fmode, wanted;
209
210 if (cf) {
211 dout("open file %p is already opened\n", file);
212 return 0;
213 }
214
215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
216 flags = file->f_flags & ~(O_CREAT|O_EXCL);
217 if (S_ISDIR(inode->i_mode))
218 flags = O_DIRECTORY; /* mds likes to know */
219
220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
221 ceph_vinop(inode), file, flags, file->f_flags);
222 fmode = ceph_flags_to_mode(flags);
223 wanted = ceph_caps_for_mode(fmode);
224
225 /* snapped files are read-only */
226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
227 return -EROFS;
228
229 /* trivially open snapdir */
230 if (ceph_snap(inode) == CEPH_SNAPDIR) {
be655596 231 spin_lock(&ci->i_ceph_lock);
124e68e7 232 __ceph_get_fmode(ci, fmode);
be655596 233 spin_unlock(&ci->i_ceph_lock);
124e68e7
SW
234 return ceph_init_file(inode, file, fmode);
235 }
236
237 /*
7421ab80
SW
238 * No need to block if we have caps on the auth MDS (for
239 * write) or any MDS (for read). Update wanted set
124e68e7
SW
240 * asynchronously.
241 */
be655596 242 spin_lock(&ci->i_ceph_lock);
7421ab80
SW
243 if (__ceph_is_any_real_caps(ci) &&
244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
124e68e7
SW
245 int mds_wanted = __ceph_caps_mds_wanted(ci);
246 int issued = __ceph_caps_issued(ci, NULL);
247
248 dout("open %p fmode %d want %s issued %s using existing\n",
249 inode, fmode, ceph_cap_string(wanted),
250 ceph_cap_string(issued));
251 __ceph_get_fmode(ci, fmode);
be655596 252 spin_unlock(&ci->i_ceph_lock);
124e68e7
SW
253
254 /* adjust wanted? */
255 if ((issued & wanted) != wanted &&
256 (mds_wanted & wanted) != wanted &&
257 ceph_snap(inode) != CEPH_SNAPDIR)
258 ceph_check_caps(ci, 0, NULL);
259
260 return ceph_init_file(inode, file, fmode);
261 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
262 (ci->i_snap_caps & wanted) == wanted) {
263 __ceph_get_fmode(ci, fmode);
be655596 264 spin_unlock(&ci->i_ceph_lock);
124e68e7
SW
265 return ceph_init_file(inode, file, fmode);
266 }
99ccbd22 267
be655596 268 spin_unlock(&ci->i_ceph_lock);
124e68e7
SW
269
270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
271 req = prepare_open_request(inode->i_sb, flags, 0);
272 if (IS_ERR(req)) {
273 err = PTR_ERR(req);
274 goto out;
275 }
70b666c3
SW
276 req->r_inode = inode;
277 ihold(inode);
99ccbd22 278
124e68e7 279 req->r_num_caps = 1;
e36d571d 280 err = ceph_mdsc_do_request(mdsc, NULL, req);
124e68e7
SW
281 if (!err)
282 err = ceph_init_file(inode, file, req->r_fmode);
283 ceph_mdsc_put_request(req);
284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
285out:
286 return err;
287}
288
289
290/*
5ef50c3b
SW
291 * Do a lookup + open with a single request. If we get a non-existent
292 * file or symlink, return 1 so the VFS can retry.
124e68e7 293 */
5ef50c3b 294int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
30d90494 295 struct file *file, unsigned flags, umode_t mode,
d9585277 296 int *opened)
124e68e7 297{
3d14c5d2
YS
298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
299 struct ceph_mds_client *mdsc = fsc->mdsc;
124e68e7 300 struct ceph_mds_request *req;
5ef50c3b 301 struct dentry *dn;
b1ee94aa 302 struct ceph_acls_info acls = {};
124e68e7 303 int err;
124e68e7 304
a455589f
AV
305 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
306 dir, dentry, dentry,
5ef50c3b
SW
307 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
308
309 if (dentry->d_name.len > NAME_MAX)
310 return -ENAMETOOLONG;
311
312 err = ceph_init_dentry(dentry);
313 if (err < 0)
314 return err;
124e68e7 315
b1ee94aa
YZ
316 if (flags & O_CREAT) {
317 err = ceph_pre_init_acls(dir, &mode, &acls);
318 if (err < 0)
319 return err;
320 }
321
124e68e7
SW
322 /* do the open */
323 req = prepare_open_request(dir->i_sb, flags, mode);
b1ee94aa
YZ
324 if (IS_ERR(req)) {
325 err = PTR_ERR(req);
326 goto out_acl;
327 }
124e68e7
SW
328 req->r_dentry = dget(dentry);
329 req->r_num_caps = 2;
330 if (flags & O_CREAT) {
331 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
332 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
b1ee94aa
YZ
333 if (acls.pagelist) {
334 req->r_pagelist = acls.pagelist;
335 acls.pagelist = NULL;
336 }
124e68e7
SW
337 }
338 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
acda7657
SW
339 err = ceph_mdsc_do_request(mdsc,
340 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
341 req);
bf91c315 342 err = ceph_handle_snapdir(req, dentry, err);
79aec984 343 if (err)
b1ee94aa 344 goto out_req;
79aec984 345
a43137f7 346 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
124e68e7 347 err = ceph_handle_notrace_create(dir, dentry);
2d83bde9 348
5ef50c3b
SW
349 if (d_unhashed(dentry)) {
350 dn = ceph_finish_lookup(req, dentry, err);
351 if (IS_ERR(dn))
352 err = PTR_ERR(dn);
353 } else {
354 /* we were given a hashed negative dentry */
355 dn = NULL;
356 }
357 if (err)
b1ee94aa 358 goto out_req;
2b0143b5 359 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
5ef50c3b
SW
360 /* make vfs retry on splice, ENOENT, or symlink */
361 dout("atomic_open finish_no_open on dn %p\n", dn);
362 err = finish_no_open(file, dn);
363 } else {
364 dout("atomic_open finish_open on dn %p\n", dn);
6e8575fa 365 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
2b0143b5 366 ceph_init_inode_acls(d_inode(dentry), &acls);
6e8575fa
SL
367 *opened |= FILE_CREATED;
368 }
5ef50c3b
SW
369 err = finish_open(file, dentry, ceph_open, opened);
370 }
b1ee94aa 371out_req:
ab866549
YZ
372 if (!req->r_err && req->r_target_inode)
373 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
5ef50c3b 374 ceph_mdsc_put_request(req);
b1ee94aa
YZ
375out_acl:
376 ceph_release_acls_info(&acls);
5ef50c3b 377 dout("atomic_open result=%d\n", err);
d9585277 378 return err;
124e68e7
SW
379}
380
381int ceph_release(struct inode *inode, struct file *file)
382{
383 struct ceph_inode_info *ci = ceph_inode(inode);
384 struct ceph_file_info *cf = file->private_data;
385
386 dout("release inode %p file %p\n", inode, file);
387 ceph_put_fmode(ci, cf->fmode);
388 if (cf->last_readdir)
389 ceph_mdsc_put_request(cf->last_readdir);
390 kfree(cf->last_name);
391 kfree(cf->dir_info);
124e68e7 392 kmem_cache_free(ceph_file_cachep, cf);
195d3ce2
SW
393
394 /* wake up anyone waiting for caps on this inode */
03066f23 395 wake_up_all(&ci->i_cap_wq);
124e68e7
SW
396 return 0;
397}
398
83701246 399enum {
c8fe9b17
YZ
400 HAVE_RETRIED = 1,
401 CHECK_EOF = 2,
402 READ_INLINE = 3,
83701246
YZ
403};
404
124e68e7
SW
405/*
406 * Read a range of bytes striped over one or more objects. Iterate over
407 * objects we stripe over. (That's not atomic, but good enough for now.)
408 *
409 * If we get a short result from the OSD, check against i_size; we need to
410 * only return a short read to the caller if we hit EOF.
411 */
412static int striped_read(struct inode *inode,
413 u64 off, u64 len,
6a026589 414 struct page **pages, int num_pages,
c8fe9b17 415 int *checkeof)
124e68e7 416{
3d14c5d2 417 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
124e68e7 418 struct ceph_inode_info *ci = ceph_inode(inode);
688bac46 419 u64 pos, this_len, left;
99c88e69 420 loff_t i_size;
c8fe9b17
YZ
421 int page_align, pages_left;
422 int read, ret;
124e68e7 423 struct page **page_pos;
124e68e7
SW
424 bool hit_stripe, was_short;
425
426 /*
427 * we may need to do multiple reads. not atomic, unfortunately.
428 */
429 pos = off;
430 left = len;
431 page_pos = pages;
432 pages_left = num_pages;
433 read = 0;
434
435more:
c8fe9b17 436 page_align = pos & ~PAGE_MASK;
124e68e7 437 this_len = left;
3d14c5d2 438 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
124e68e7
SW
439 &ci->i_layout, pos, &this_len,
440 ci->i_truncate_seq,
441 ci->i_truncate_size,
b7495fc2 442 page_pos, pages_left, page_align);
124e68e7
SW
443 if (ret == -ENOENT)
444 ret = 0;
0e98728f
SW
445 hit_stripe = this_len < left;
446 was_short = ret >= 0 && ret < this_len;
688bac46 447 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
124e68e7
SW
448 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
449
99c88e69 450 i_size = i_size_read(inode);
02ae66d8 451 if (ret >= 0) {
452 int didpages;
99c88e69
YZ
453 if (was_short && (pos + ret < i_size)) {
454 int zlen = min(this_len - ret, i_size - pos - ret);
c8fe9b17 455 int zoff = (off & ~PAGE_MASK) + read + ret;
02ae66d8 456 dout(" zero gap %llu to %llu\n",
1487a688
YZ
457 pos + ret, pos + ret + zlen);
458 ceph_zero_page_vector_range(zoff, zlen, pages);
459 ret += zlen;
124e68e7 460 }
02ae66d8 461
462 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
124e68e7
SW
463 pos += ret;
464 read = pos - off;
465 left -= ret;
466 page_pos += didpages;
467 pages_left -= didpages;
468
02ae66d8 469 /* hit stripe and need continue*/
99c88e69 470 if (left && hit_stripe && pos < i_size)
124e68e7
SW
471 goto more;
472 }
473
ee7289bf 474 if (read > 0) {
02ae66d8 475 ret = read;
c3cd6283 476 /* did we bounce off eof? */
99c88e69 477 if (pos + left > i_size)
83701246 478 *checkeof = CHECK_EOF;
124e68e7
SW
479 }
480
124e68e7
SW
481 dout("striped_read returns %d\n", ret);
482 return ret;
483}
484
485/*
486 * Completely synchronous read and write methods. Direct from __user
487 * buffer to osd, or directly to user pages (if O_DIRECT).
488 *
489 * If the read spans object boundary, just do multiple reads.
490 */
8eb4efb0 491static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
492 int *checkeof)
124e68e7 493{
8eb4efb0 494 struct file *file = iocb->ki_filp;
496ad9aa 495 struct inode *inode = file_inode(file);
124e68e7 496 struct page **pages;
8eb4efb0 497 u64 off = iocb->ki_pos;
ab226e21 498 int num_pages, ret;
2b777c9d 499 size_t len = iov_iter_count(i);
124e68e7 500
8eb4efb0 501 dout("sync_read on file %p %llu~%u %s\n", file, off,
502 (unsigned)len,
124e68e7 503 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
d0d0db22
YZ
504
505 if (!len)
506 return 0;
e98b6fed
SW
507 /*
508 * flush any page cache pages in this range. this
509 * will make concurrent normal and sync io slow,
510 * but it will at least behave sensibly when they are
511 * in sequence.
512 */
8eb4efb0 513 ret = filemap_write_and_wait_range(inode->i_mapping, off,
514 off + len);
29065a51 515 if (ret < 0)
8eb4efb0 516 return ret;
29065a51 517
c8fe9b17
YZ
518 num_pages = calc_pages_for(off, len);
519 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
520 if (IS_ERR(pages))
521 return PTR_ERR(pages);
522 ret = striped_read(inode, off, len, pages,
523 num_pages, checkeof);
524 if (ret > 0) {
525 int l, k = 0;
526 size_t left = ret;
527
528 while (left) {
529 size_t page_off = off & ~PAGE_MASK;
530 size_t copy = min_t(size_t, left,
531 PAGE_SIZE - page_off);
532 l = copy_page_to_iter(pages[k++], page_off, copy, i);
533 off += l;
534 left -= l;
535 if (l < copy)
8eb4efb0 536 break;
537 }
8eb4efb0 538 }
c8fe9b17 539 ceph_release_page_vector(pages, num_pages);
124e68e7 540
8eb4efb0 541 if (off > iocb->ki_pos) {
542 ret = off - iocb->ki_pos;
543 iocb->ki_pos = off;
544 }
124e68e7 545
124e68e7
SW
546 dout("sync_read result %d\n", ret);
547 return ret;
548}
549
c8fe9b17
YZ
550struct ceph_aio_request {
551 struct kiocb *iocb;
552 size_t total_len;
553 int write;
554 int error;
555 struct list_head osd_reqs;
556 unsigned num_reqs;
557 atomic_t pending_reqs;
5be0389d 558 struct timespec mtime;
c8fe9b17
YZ
559 struct ceph_cap_flush *prealloc_cf;
560};
561
5be0389d
YZ
562struct ceph_aio_work {
563 struct work_struct work;
564 struct ceph_osd_request *req;
565};
566
567static void ceph_aio_retry_work(struct work_struct *work);
568
c8fe9b17
YZ
569static void ceph_aio_complete(struct inode *inode,
570 struct ceph_aio_request *aio_req)
571{
572 struct ceph_inode_info *ci = ceph_inode(inode);
573 int ret;
574
575 if (!atomic_dec_and_test(&aio_req->pending_reqs))
576 return;
577
578 ret = aio_req->error;
579 if (!ret)
580 ret = aio_req->total_len;
581
582 dout("ceph_aio_complete %p rc %d\n", inode, ret);
583
584 if (ret >= 0 && aio_req->write) {
585 int dirty;
586
587 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
588 if (endoff > i_size_read(inode)) {
589 if (ceph_inode_set_size(inode, endoff))
590 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
591 }
592
593 spin_lock(&ci->i_ceph_lock);
594 ci->i_inline_version = CEPH_INLINE_NONE;
595 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
596 &aio_req->prealloc_cf);
597 spin_unlock(&ci->i_ceph_lock);
598 if (dirty)
599 __mark_inode_dirty(inode, dirty);
600
601 }
602
603 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
604 CEPH_CAP_FILE_RD));
605
606 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
607
608 ceph_free_cap_flush(aio_req->prealloc_cf);
609 kfree(aio_req);
610}
611
612static void ceph_aio_complete_req(struct ceph_osd_request *req,
613 struct ceph_msg *msg)
614{
615 int rc = req->r_result;
616 struct inode *inode = req->r_inode;
617 struct ceph_aio_request *aio_req = req->r_priv;
618 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
619 int num_pages = calc_pages_for((u64)osd_data->alignment,
620 osd_data->length);
621
622 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
623 inode, rc, osd_data->length);
624
625 if (rc == -EOLDSNAPC) {
5be0389d
YZ
626 struct ceph_aio_work *aio_work;
627 BUG_ON(!aio_req->write);
628
629 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
630 if (aio_work) {
631 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
632 aio_work->req = req;
633 queue_work(ceph_inode_to_client(inode)->wb_wq,
634 &aio_work->work);
635 return;
636 }
637 rc = -ENOMEM;
638 } else if (!aio_req->write) {
c8fe9b17
YZ
639 if (rc == -ENOENT)
640 rc = 0;
641 if (rc >= 0 && osd_data->length > rc) {
642 int zoff = osd_data->alignment + rc;
643 int zlen = osd_data->length - rc;
644 /*
645 * If read is satisfied by single OSD request,
646 * it can pass EOF. Otherwise read is within
647 * i_size.
648 */
649 if (aio_req->num_reqs == 1) {
650 loff_t i_size = i_size_read(inode);
651 loff_t endoff = aio_req->iocb->ki_pos + rc;
652 if (endoff < i_size)
653 zlen = min_t(size_t, zlen,
654 i_size - endoff);
655 aio_req->total_len = rc + zlen;
656 }
657
658 if (zlen > 0)
659 ceph_zero_page_vector_range(zoff, zlen,
660 osd_data->pages);
661 }
662 }
663
664 ceph_put_page_vector(osd_data->pages, num_pages, false);
665 ceph_osdc_put_request(req);
666
667 if (rc < 0)
668 cmpxchg(&aio_req->error, 0, rc);
669
670 ceph_aio_complete(inode, aio_req);
671 return;
672}
673
5be0389d
YZ
674static void ceph_aio_retry_work(struct work_struct *work)
675{
676 struct ceph_aio_work *aio_work =
677 container_of(work, struct ceph_aio_work, work);
678 struct ceph_osd_request *orig_req = aio_work->req;
679 struct ceph_aio_request *aio_req = orig_req->r_priv;
680 struct inode *inode = orig_req->r_inode;
681 struct ceph_inode_info *ci = ceph_inode(inode);
682 struct ceph_snap_context *snapc;
683 struct ceph_osd_request *req;
684 int ret;
685
686 spin_lock(&ci->i_ceph_lock);
687 if (__ceph_have_pending_cap_snap(ci)) {
688 struct ceph_cap_snap *capsnap =
689 list_last_entry(&ci->i_cap_snaps,
690 struct ceph_cap_snap,
691 ci_item);
692 snapc = ceph_get_snap_context(capsnap->context);
693 } else {
694 BUG_ON(!ci->i_head_snapc);
695 snapc = ceph_get_snap_context(ci->i_head_snapc);
696 }
697 spin_unlock(&ci->i_ceph_lock);
698
699 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
700 false, GFP_NOFS);
1418bf07
DC
701 if (!req) {
702 ret = -ENOMEM;
5be0389d
YZ
703 req = orig_req;
704 goto out;
705 }
706
707 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
708 CEPH_OSD_FLAG_ONDISK |
709 CEPH_OSD_FLAG_WRITE;
710 req->r_base_oloc = orig_req->r_base_oloc;
711 req->r_base_oid = orig_req->r_base_oid;
712
713 req->r_ops[0] = orig_req->r_ops[0];
714 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
715
716 ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
717 snapc, CEPH_NOSNAP, &aio_req->mtime);
718
5be0389d
YZ
719 ceph_osdc_put_request(orig_req);
720
721 req->r_callback = ceph_aio_complete_req;
722 req->r_inode = inode;
723 req->r_priv = aio_req;
724
725 ret = ceph_osdc_start_request(req->r_osdc, req, false);
726out:
727 if (ret < 0) {
5be0389d
YZ
728 req->r_result = ret;
729 ceph_aio_complete_req(req, NULL);
730 }
731
db6aed70 732 ceph_put_snap_context(snapc);
5be0389d
YZ
733 kfree(aio_work);
734}
735
124e68e7 736/*
26be8808
AE
737 * Write commit request unsafe callback, called to tell us when a
738 * request is unsafe (that is, in flight--has been handed to the
739 * messenger to send to its target osd). It is called again when
740 * we've received a response message indicating the request is
741 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
742 * is completed early (and unsuccessfully) due to a timeout or
743 * interrupt.
744 *
745 * This is used if we requested both an ACK and ONDISK commit reply
746 * from the OSD.
124e68e7 747 */
26be8808 748static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
124e68e7
SW
749{
750 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
751
26be8808
AE
752 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
753 unsafe ? "un" : "");
754 if (unsafe) {
755 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
756 spin_lock(&ci->i_unsafe_lock);
757 list_add_tail(&req->r_unsafe_item,
758 &ci->i_unsafe_writes);
759 spin_unlock(&ci->i_unsafe_lock);
760 } else {
761 spin_lock(&ci->i_unsafe_lock);
762 list_del_init(&req->r_unsafe_item);
763 spin_unlock(&ci->i_unsafe_lock);
764 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
765 }
124e68e7
SW
766}
767
e8344e66 768
e8344e66 769static ssize_t
c8fe9b17
YZ
770ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
771 struct ceph_snap_context *snapc,
772 struct ceph_cap_flush **pcf)
124e68e7 773{
e8344e66 774 struct file *file = iocb->ki_filp;
496ad9aa 775 struct inode *inode = file_inode(file);
124e68e7 776 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2 777 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
acead002 778 struct ceph_vino vino;
124e68e7
SW
779 struct ceph_osd_request *req;
780 struct page **pages;
c8fe9b17
YZ
781 struct ceph_aio_request *aio_req = NULL;
782 int num_pages = 0;
124e68e7 783 int flags;
124e68e7
SW
784 int ret;
785 struct timespec mtime = CURRENT_TIME;
c8fe9b17
YZ
786 size_t count = iov_iter_count(iter);
787 loff_t pos = iocb->ki_pos;
788 bool write = iov_iter_rw(iter) == WRITE;
124e68e7 789
c8fe9b17 790 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
124e68e7
SW
791 return -EROFS;
792
c8fe9b17
YZ
793 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
794 (write ? "write" : "read"), file, pos, (unsigned)count);
124e68e7 795
e8344e66 796 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
29065a51
YS
797 if (ret < 0)
798 return ret;
799
c8fe9b17
YZ
800 if (write) {
801 ret = invalidate_inode_pages2_range(inode->i_mapping,
802 pos >> PAGE_CACHE_SHIFT,
803 (pos + count) >> PAGE_CACHE_SHIFT);
804 if (ret < 0)
805 dout("invalidate_inode_pages2_range returned %d\n", ret);
29065a51 806
c8fe9b17
YZ
807 flags = CEPH_OSD_FLAG_ORDERSNAP |
808 CEPH_OSD_FLAG_ONDISK |
809 CEPH_OSD_FLAG_WRITE;
810 } else {
811 flags = CEPH_OSD_FLAG_READ;
812 }
124e68e7 813
c8fe9b17
YZ
814 while (iov_iter_count(iter) > 0) {
815 u64 size = dio_get_pagev_size(iter);
816 size_t start = 0;
817 ssize_t len;
e8344e66 818
e8344e66 819 vino = ceph_vino(inode);
820 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
c8fe9b17
YZ
821 vino, pos, &size, 0,
822 /*include a 'startsync' command*/
823 write ? 2 : 1,
824 write ? CEPH_OSD_OP_WRITE :
825 CEPH_OSD_OP_READ,
826 flags, snapc,
e8344e66 827 ci->i_truncate_seq,
828 ci->i_truncate_size,
829 false);
830 if (IS_ERR(req)) {
831 ret = PTR_ERR(req);
eab87235 832 break;
e8344e66 833 }
124e68e7 834
c8fe9b17
YZ
835 len = size;
836 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
b5b98989 837 if (IS_ERR(pages)) {
64c31311 838 ceph_osdc_put_request(req);
b5b98989 839 ret = PTR_ERR(pages);
64c31311 840 break;
124e68e7
SW
841 }
842
843 /*
c8fe9b17
YZ
844 * To simplify error handling, allow AIO when IO within i_size
845 * or IO can be satisfied by single OSD request.
124e68e7 846 */
c8fe9b17
YZ
847 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
848 (len == count || pos + count <= i_size_read(inode))) {
849 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
850 if (aio_req) {
851 aio_req->iocb = iocb;
852 aio_req->write = write;
853 INIT_LIST_HEAD(&aio_req->osd_reqs);
854 if (write) {
5be0389d 855 aio_req->mtime = mtime;
c8fe9b17
YZ
856 swap(aio_req->prealloc_cf, *pcf);
857 }
858 }
859 /* ignore error */
860 }
861
862 if (write) {
863 /*
864 * throw out any page cache pages in this range. this
865 * may block.
866 */
867 truncate_inode_pages_range(inode->i_mapping, pos,
868 (pos+len) | (PAGE_CACHE_SIZE - 1));
869
870 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
871 }
872
873
874 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
875 false, false);
e8344e66 876
e8344e66 877 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
878
c8fe9b17
YZ
879 if (aio_req) {
880 aio_req->total_len += len;
881 aio_req->num_reqs++;
882 atomic_inc(&aio_req->pending_reqs);
883
884 req->r_callback = ceph_aio_complete_req;
885 req->r_inode = inode;
886 req->r_priv = aio_req;
887 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
888
889 pos += len;
890 iov_iter_advance(iter, len);
891 continue;
892 }
893
894 ret = ceph_osdc_start_request(req->r_osdc, req, false);
e8344e66 895 if (!ret)
896 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
897
c8fe9b17
YZ
898 size = i_size_read(inode);
899 if (!write) {
900 if (ret == -ENOENT)
901 ret = 0;
902 if (ret >= 0 && ret < len && pos + ret < size) {
903 int zlen = min_t(size_t, len - ret,
904 size - pos - ret);
905 ceph_zero_page_vector_range(start + ret, zlen,
906 pages);
907 ret += zlen;
908 }
909 if (ret >= 0)
910 len = ret;
911 }
912
e8344e66 913 ceph_put_page_vector(pages, num_pages, false);
914
e8344e66 915 ceph_osdc_put_request(req);
c8fe9b17 916 if (ret < 0)
e8344e66 917 break;
64c31311 918
c8fe9b17
YZ
919 pos += len;
920 iov_iter_advance(iter, len);
921
922 if (!write && pos >= size)
e8344e66 923 break;
64c31311 924
c8fe9b17
YZ
925 if (write && pos > size) {
926 if (ceph_inode_set_size(inode, pos))
64c31311
AV
927 ceph_check_caps(ceph_inode(inode),
928 CHECK_CAPS_AUTHONLY,
929 NULL);
930 }
e8344e66 931 }
932
c8fe9b17
YZ
933 if (aio_req) {
934 if (aio_req->num_reqs == 0) {
935 kfree(aio_req);
936 return ret;
937 }
938
939 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
940 CEPH_CAP_FILE_RD);
941
942 while (!list_empty(&aio_req->osd_reqs)) {
943 req = list_first_entry(&aio_req->osd_reqs,
944 struct ceph_osd_request,
945 r_unsafe_item);
946 list_del_init(&req->r_unsafe_item);
947 if (ret >= 0)
948 ret = ceph_osdc_start_request(req->r_osdc,
949 req, false);
950 if (ret < 0) {
951 req->r_result = ret;
952 ceph_aio_complete_req(req, NULL);
953 }
954 }
955 return -EIOCBQUEUED;
956 }
957
958 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
959 ret = pos - iocb->ki_pos;
e8344e66 960 iocb->ki_pos = pos;
e8344e66 961 }
962 return ret;
963}
964
e8344e66 965/*
966 * Synchronous write, straight from __user pointer or user pages.
967 *
968 * If write spans object boundary, just do multiple writes. (For a
969 * correct atomic write, we should e.g. take write locks on all
970 * objects, rollback on failure, etc.)
971 */
06fee30f 972static ssize_t
5dda377c
YZ
973ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
974 struct ceph_snap_context *snapc)
e8344e66 975{
976 struct file *file = iocb->ki_filp;
977 struct inode *inode = file_inode(file);
978 struct ceph_inode_info *ci = ceph_inode(inode);
979 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
e8344e66 980 struct ceph_vino vino;
981 struct ceph_osd_request *req;
982 struct page **pages;
983 u64 len;
984 int num_pages;
985 int written = 0;
986 int flags;
987 int check_caps = 0;
988 int ret;
989 struct timespec mtime = CURRENT_TIME;
4908b822 990 size_t count = iov_iter_count(from);
e8344e66 991
992 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
993 return -EROFS;
994
995 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
996
997 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
998 if (ret < 0)
999 return ret;
1000
1001 ret = invalidate_inode_pages2_range(inode->i_mapping,
1002 pos >> PAGE_CACHE_SHIFT,
1003 (pos + count) >> PAGE_CACHE_SHIFT);
1004 if (ret < 0)
1005 dout("invalidate_inode_pages2_range returned %d\n", ret);
1006
1007 flags = CEPH_OSD_FLAG_ORDERSNAP |
1008 CEPH_OSD_FLAG_ONDISK |
1009 CEPH_OSD_FLAG_WRITE |
1010 CEPH_OSD_FLAG_ACK;
1011
4908b822 1012 while ((len = iov_iter_count(from)) > 0) {
e8344e66 1013 size_t left;
1014 int n;
1015
e8344e66 1016 vino = ceph_vino(inode);
1017 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
715e4cd4 1018 vino, pos, &len, 0, 1,
e8344e66 1019 CEPH_OSD_OP_WRITE, flags, snapc,
1020 ci->i_truncate_seq,
1021 ci->i_truncate_size,
1022 false);
1023 if (IS_ERR(req)) {
1024 ret = PTR_ERR(req);
eab87235 1025 break;
e8344e66 1026 }
1027
1028 /*
1029 * write from beginning of first page,
1030 * regardless of io alignment
1031 */
1032 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1033
687265e5 1034 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
124e68e7
SW
1035 if (IS_ERR(pages)) {
1036 ret = PTR_ERR(pages);
1037 goto out;
1038 }
e8344e66 1039
1040 left = len;
1041 for (n = 0; n < num_pages; n++) {
125d725c 1042 size_t plen = min_t(size_t, left, PAGE_SIZE);
4908b822 1043 ret = copy_page_from_iter(pages[n], 0, plen, from);
e8344e66 1044 if (ret != plen) {
1045 ret = -EFAULT;
1046 break;
1047 }
1048 left -= ret;
e8344e66 1049 }
1050
124e68e7
SW
1051 if (ret < 0) {
1052 ceph_release_page_vector(pages, num_pages);
1053 goto out;
1054 }
1055
e8344e66 1056 /* get a second commit callback */
1057 req->r_unsafe_callback = ceph_sync_write_unsafe;
1058 req->r_inode = inode;
124e68e7 1059
e8344e66 1060 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1061 false, true);
02ee07d3 1062
e8344e66 1063 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
1064 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
124e68e7 1065
e8344e66 1066 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1067 if (!ret)
1068 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
124e68e7
SW
1069
1070out:
e8344e66 1071 ceph_osdc_put_request(req);
1072 if (ret == 0) {
1073 pos += len;
1074 written += len;
1075
1076 if (pos > i_size_read(inode)) {
1077 check_caps = ceph_inode_set_size(inode, pos);
1078 if (check_caps)
1079 ceph_check_caps(ceph_inode(inode),
1080 CHECK_CAPS_AUTHONLY,
1081 NULL);
1082 }
1083 } else
1084 break;
1085 }
124e68e7 1086
e8344e66 1087 if (ret != -EOLDSNAPC && written > 0) {
124e68e7 1088 ret = written;
e8344e66 1089 iocb->ki_pos = pos;
124e68e7
SW
1090 }
1091 return ret;
1092}
1093
1094/*
1095 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1096 * Atomically grab references, so that those bits are not released
1097 * back to the MDS mid-read.
1098 *
1099 * Hmm, the sync read case isn't actually async... should it be?
1100 */
3644424d 1101static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
124e68e7
SW
1102{
1103 struct file *filp = iocb->ki_filp;
2962507c 1104 struct ceph_file_info *fi = filp->private_data;
66ee59af 1105 size_t len = iov_iter_count(to);
496ad9aa 1106 struct inode *inode = file_inode(filp);
124e68e7 1107 struct ceph_inode_info *ci = ceph_inode(inode);
3738daa6 1108 struct page *pinned_page = NULL;
124e68e7 1109 ssize_t ret;
2962507c 1110 int want, got = 0;
83701246 1111 int retry_op = 0, read = 0;
124e68e7 1112
6a026589 1113again:
8eb4efb0 1114 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1115 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1116
2962507c
SW
1117 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1118 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1119 else
1120 want = CEPH_CAP_FILE_CACHE;
3738daa6 1121 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
124e68e7 1122 if (ret < 0)
8eb4efb0 1123 return ret;
124e68e7 1124
2962507c 1125 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2ba48ce5 1126 (iocb->ki_flags & IOCB_DIRECT) ||
8eb4efb0 1127 (fi->flags & CEPH_F_SYNC)) {
8eb4efb0 1128
1129 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1130 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1131 ceph_cap_string(got));
1132
83701246 1133 if (ci->i_inline_version == CEPH_INLINE_NONE) {
c8fe9b17
YZ
1134 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1135 ret = ceph_direct_read_write(iocb, to,
1136 NULL, NULL);
1137 if (ret >= 0 && ret < len)
1138 retry_op = CHECK_EOF;
1139 } else {
1140 ret = ceph_sync_read(iocb, to, &retry_op);
1141 }
83701246
YZ
1142 } else {
1143 retry_op = READ_INLINE;
1144 }
8eb4efb0 1145 } else {
8eb4efb0 1146 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
3644424d 1147 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
8eb4efb0 1148 ceph_cap_string(got));
124e68e7 1149
3644424d 1150 ret = generic_file_read_iter(iocb, to);
8eb4efb0 1151 }
124e68e7
SW
1152 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1153 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
3738daa6
YZ
1154 if (pinned_page) {
1155 page_cache_release(pinned_page);
1156 pinned_page = NULL;
1157 }
124e68e7 1158 ceph_put_cap_refs(ci, got);
c8fe9b17 1159 if (retry_op > HAVE_RETRIED && ret >= 0) {
83701246
YZ
1160 int statret;
1161 struct page *page = NULL;
1162 loff_t i_size;
1163 if (retry_op == READ_INLINE) {
687265e5 1164 page = __page_cache_alloc(GFP_KERNEL);
83701246
YZ
1165 if (!page)
1166 return -ENOMEM;
1167 }
6a026589 1168
83701246
YZ
1169 statret = __ceph_do_getattr(inode, page,
1170 CEPH_STAT_CAP_INLINE_DATA, !!page);
1171 if (statret < 0) {
1172 __free_page(page);
1173 if (statret == -ENODATA) {
1174 BUG_ON(retry_op != READ_INLINE);
1175 goto again;
1176 }
1177 return statret;
1178 }
6a026589 1179
83701246
YZ
1180 i_size = i_size_read(inode);
1181 if (retry_op == READ_INLINE) {
fcc02d2a
YZ
1182 BUG_ON(ret > 0 || read > 0);
1183 if (iocb->ki_pos < i_size &&
1184 iocb->ki_pos < PAGE_CACHE_SIZE) {
83701246
YZ
1185 loff_t end = min_t(loff_t, i_size,
1186 iocb->ki_pos + len);
fcc02d2a 1187 end = min_t(loff_t, end, PAGE_CACHE_SIZE);
83701246
YZ
1188 if (statret < end)
1189 zero_user_segment(page, statret, end);
1190 ret = copy_page_to_iter(page,
1191 iocb->ki_pos & ~PAGE_MASK,
1192 end - iocb->ki_pos, to);
1193 iocb->ki_pos += ret;
fcc02d2a
YZ
1194 read += ret;
1195 }
1196 if (iocb->ki_pos < i_size && read < len) {
1197 size_t zlen = min_t(size_t, len - read,
1198 i_size - iocb->ki_pos);
1199 ret = iov_iter_zero(zlen, to);
1200 iocb->ki_pos += ret;
1201 read += ret;
83701246
YZ
1202 }
1203 __free_pages(page, 0);
fcc02d2a 1204 return read;
83701246 1205 }
6a026589
SW
1206
1207 /* hit EOF or hole? */
83701246 1208 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
fcc02d2a 1209 ret < len) {
8eb4efb0 1210 dout("sync_read hit hole, ppos %lld < size %lld"
99c88e69 1211 ", reading more\n", iocb->ki_pos, i_size);
8eb4efb0 1212
6a026589 1213 read += ret;
6a026589 1214 len -= ret;
c8fe9b17 1215 retry_op = HAVE_RETRIED;
6a026589
SW
1216 goto again;
1217 }
1218 }
8eb4efb0 1219
6a026589
SW
1220 if (ret >= 0)
1221 ret += read;
1222
124e68e7
SW
1223 return ret;
1224}
1225
1226/*
1227 * Take cap references to avoid releasing caps to MDS mid-write.
1228 *
1229 * If we are synchronous, and write with an old snap context, the OSD
1230 * may return EOLDSNAPC. In that case, retry the write.. _after_
1231 * dropping our cap refs and allowing the pending snap to logically
1232 * complete _before_ this write occurs.
1233 *
1234 * If we are near ENOSPC, write synchronously.
1235 */
4908b822 1236static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
124e68e7
SW
1237{
1238 struct file *file = iocb->ki_filp;
33caad32 1239 struct ceph_file_info *fi = file->private_data;
496ad9aa 1240 struct inode *inode = file_inode(file);
124e68e7 1241 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
1242 struct ceph_osd_client *osdc =
1243 &ceph_sb_to_client(inode->i_sb)->client->osdc;
f66fd9f0 1244 struct ceph_cap_flush *prealloc_cf;
3309dd04 1245 ssize_t count, written = 0;
03d254ed 1246 int err, want, got;
3309dd04 1247 loff_t pos;
124e68e7
SW
1248
1249 if (ceph_snap(inode) != CEPH_NOSNAP)
1250 return -EROFS;
1251
f66fd9f0
YZ
1252 prealloc_cf = ceph_alloc_cap_flush();
1253 if (!prealloc_cf)
1254 return -ENOMEM;
1255
5955102c 1256 inode_lock(inode);
03d254ed 1257
03d254ed 1258 /* We can write back this queue in page reclaim */
de1414a6 1259 current->backing_dev_info = inode_to_bdi(inode);
03d254ed 1260
55b0b31c
YZ
1261 if (iocb->ki_flags & IOCB_APPEND) {
1262 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1263 if (err < 0)
1264 goto out;
1265 }
1266
3309dd04
AV
1267 err = generic_write_checks(iocb, from);
1268 if (err <= 0)
03d254ed
YZ
1269 goto out;
1270
3309dd04
AV
1271 pos = iocb->ki_pos;
1272 count = iov_iter_count(from);
5fa8e0a1 1273 err = file_remove_privs(file);
03d254ed
YZ
1274 if (err)
1275 goto out;
1276
1277 err = file_update_time(file);
1278 if (err)
1279 goto out;
1280
28127bdd
YZ
1281 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1282 err = ceph_uninline_data(file, NULL);
1283 if (err < 0)
1284 goto out;
1285 }
1286
124e68e7 1287retry_snap:
6070e0c1 1288 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
03d254ed 1289 err = -ENOSPC;
6070e0c1
YZ
1290 goto out;
1291 }
03d254ed 1292
ac7f29bf 1293 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
99c88e69 1294 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
7971bd92
SW
1295 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1296 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1297 else
1298 want = CEPH_CAP_FILE_BUFFER;
03d254ed 1299 got = 0;
3738daa6
YZ
1300 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1301 &got, NULL);
03d254ed 1302 if (err < 0)
37505d57 1303 goto out;
124e68e7 1304
ac7f29bf 1305 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
03d254ed 1306 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
7971bd92
SW
1307
1308 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2ba48ce5 1309 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
5dda377c 1310 struct ceph_snap_context *snapc;
4908b822 1311 struct iov_iter data;
5955102c 1312 inode_unlock(inode);
5dda377c
YZ
1313
1314 spin_lock(&ci->i_ceph_lock);
1315 if (__ceph_have_pending_cap_snap(ci)) {
1316 struct ceph_cap_snap *capsnap =
1317 list_last_entry(&ci->i_cap_snaps,
1318 struct ceph_cap_snap,
1319 ci_item);
1320 snapc = ceph_get_snap_context(capsnap->context);
1321 } else {
1322 BUG_ON(!ci->i_head_snapc);
1323 snapc = ceph_get_snap_context(ci->i_head_snapc);
1324 }
1325 spin_unlock(&ci->i_ceph_lock);
1326
4908b822
AV
1327 /* we might need to revert back to that point */
1328 data = *from;
2ba48ce5 1329 if (iocb->ki_flags & IOCB_DIRECT)
c8fe9b17
YZ
1330 written = ceph_direct_read_write(iocb, &data, snapc,
1331 &prealloc_cf);
e8344e66 1332 else
5dda377c 1333 written = ceph_sync_write(iocb, &data, pos, snapc);
0e5dd45c 1334 if (written == -EOLDSNAPC) {
1335 dout("aio_write %p %llx.%llx %llu~%u"
1336 "got EOLDSNAPC, retrying\n",
1337 inode, ceph_vinop(inode),
4908b822 1338 pos, (unsigned)count);
5955102c 1339 inode_lock(inode);
0e5dd45c 1340 goto retry_snap;
1341 }
4908b822
AV
1342 if (written > 0)
1343 iov_iter_advance(from, written);
5dda377c 1344 ceph_put_snap_context(snapc);
7971bd92 1345 } else {
99c88e69 1346 loff_t old_size = i_size_read(inode);
b0d7c223
YZ
1347 /*
1348 * No need to acquire the i_truncate_mutex. Because
1349 * the MDS revokes Fwb caps before sending truncate
1350 * message to us. We can't get Fwb cap while there
1351 * are pending vmtruncate. So write and vmtruncate
1352 * can not run at the same time
1353 */
4908b822 1354 written = generic_perform_write(file, from, pos);
aec605f4
AV
1355 if (likely(written >= 0))
1356 iocb->ki_pos = pos + written;
99c88e69 1357 if (i_size_read(inode) > old_size)
32d3e148 1358 ceph_fscache_update_objectsize(inode);
5955102c 1359 inode_unlock(inode);
7971bd92 1360 }
d8de9ab6 1361
03d254ed 1362 if (written >= 0) {
fca65b4a 1363 int dirty;
be655596 1364 spin_lock(&ci->i_ceph_lock);
28127bdd 1365 ci->i_inline_version = CEPH_INLINE_NONE;
f66fd9f0
YZ
1366 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1367 &prealloc_cf);
be655596 1368 spin_unlock(&ci->i_ceph_lock);
fca65b4a
SW
1369 if (dirty)
1370 __mark_inode_dirty(inode, dirty);
124e68e7 1371 }
7971bd92 1372
124e68e7 1373 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
4908b822 1374 inode, ceph_vinop(inode), pos, (unsigned)count,
7971bd92 1375 ceph_cap_string(got));
124e68e7 1376 ceph_put_cap_refs(ci, got);
7971bd92 1377
03d254ed 1378 if (written >= 0 &&
6070e0c1
YZ
1379 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1380 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
03d254ed 1381 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
6070e0c1 1382 if (err < 0)
03d254ed 1383 written = err;
6070e0c1 1384 }
03d254ed 1385
2f75e9e1
SW
1386 goto out_unlocked;
1387
03d254ed 1388out:
5955102c 1389 inode_unlock(inode);
2f75e9e1 1390out_unlocked:
f66fd9f0 1391 ceph_free_cap_flush(prealloc_cf);
03d254ed 1392 current->backing_dev_info = NULL;
03d254ed 1393 return written ? written : err;
124e68e7
SW
1394}
1395
1396/*
1397 * llseek. be sure to verify file size on SEEK_END.
1398 */
965c8e59 1399static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
124e68e7
SW
1400{
1401 struct inode *inode = file->f_mapping->host;
99c88e69 1402 loff_t i_size;
124e68e7
SW
1403 int ret;
1404
5955102c 1405 inode_lock(inode);
6a82c47a 1406
965c8e59 1407 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
508b32d8 1408 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
124e68e7
SW
1409 if (ret < 0) {
1410 offset = ret;
1411 goto out;
1412 }
06222e49
JB
1413 }
1414
99c88e69 1415 i_size = i_size_read(inode);
965c8e59 1416 switch (whence) {
06222e49 1417 case SEEK_END:
99c88e69 1418 offset += i_size;
124e68e7
SW
1419 break;
1420 case SEEK_CUR:
1421 /*
1422 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1423 * position-querying operation. Avoid rewriting the "same"
1424 * f_pos value back to the file because a concurrent read(),
1425 * write() or lseek() might have altered it
1426 */
1427 if (offset == 0) {
1428 offset = file->f_pos;
1429 goto out;
1430 }
1431 offset += file->f_pos;
1432 break;
06222e49 1433 case SEEK_DATA:
99c88e69 1434 if (offset >= i_size) {
06222e49
JB
1435 ret = -ENXIO;
1436 goto out;
1437 }
1438 break;
1439 case SEEK_HOLE:
99c88e69 1440 if (offset >= i_size) {
06222e49
JB
1441 ret = -ENXIO;
1442 goto out;
1443 }
99c88e69 1444 offset = i_size;
06222e49 1445 break;
124e68e7
SW
1446 }
1447
46a1c2c7 1448 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
124e68e7
SW
1449
1450out:
5955102c 1451 inode_unlock(inode);
124e68e7
SW
1452 return offset;
1453}
1454
ad7a60de
LW
1455static inline void ceph_zero_partial_page(
1456 struct inode *inode, loff_t offset, unsigned size)
1457{
1458 struct page *page;
1459 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
1460
1461 page = find_lock_page(inode->i_mapping, index);
1462 if (page) {
1463 wait_on_page_writeback(page);
1464 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
1465 unlock_page(page);
1466 page_cache_release(page);
1467 }
1468}
1469
1470static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1471 loff_t length)
1472{
1473 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
1474 if (offset < nearly) {
1475 loff_t size = nearly - offset;
1476 if (length < size)
1477 size = length;
1478 ceph_zero_partial_page(inode, offset, size);
1479 offset += size;
1480 length -= size;
1481 }
1482 if (length >= PAGE_CACHE_SIZE) {
1483 loff_t size = round_down(length, PAGE_CACHE_SIZE);
1484 truncate_pagecache_range(inode, offset, offset + size - 1);
1485 offset += size;
1486 length -= size;
1487 }
1488 if (length)
1489 ceph_zero_partial_page(inode, offset, length);
1490}
1491
1492static int ceph_zero_partial_object(struct inode *inode,
1493 loff_t offset, loff_t *length)
1494{
1495 struct ceph_inode_info *ci = ceph_inode(inode);
1496 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1497 struct ceph_osd_request *req;
1498 int ret = 0;
1499 loff_t zero = 0;
1500 int op;
1501
1502 if (!length) {
1503 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1504 length = &zero;
1505 } else {
1506 op = CEPH_OSD_OP_ZERO;
1507 }
1508
1509 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1510 ceph_vino(inode),
1511 offset, length,
715e4cd4 1512 0, 1, op,
ad7a60de
LW
1513 CEPH_OSD_FLAG_WRITE |
1514 CEPH_OSD_FLAG_ONDISK,
1515 NULL, 0, 0, false);
1516 if (IS_ERR(req)) {
1517 ret = PTR_ERR(req);
1518 goto out;
1519 }
1520
1521 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1522 &inode->i_mtime);
1523
1524 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1525 if (!ret) {
1526 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1527 if (ret == -ENOENT)
1528 ret = 0;
1529 }
1530 ceph_osdc_put_request(req);
1531
1532out:
1533 return ret;
1534}
1535
1536static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1537{
1538 int ret = 0;
1539 struct ceph_inode_info *ci = ceph_inode(inode);
b314a90d
SW
1540 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1541 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1542 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1543 u64 object_set_size = object_size * stripe_count;
1544 u64 nearly, t;
1545
1546 /* round offset up to next period boundary */
1547 nearly = offset + object_set_size - 1;
1548 t = nearly;
1549 nearly -= do_div(t, object_set_size);
ad7a60de 1550
ad7a60de
LW
1551 while (length && offset < nearly) {
1552 loff_t size = length;
1553 ret = ceph_zero_partial_object(inode, offset, &size);
1554 if (ret < 0)
1555 return ret;
1556 offset += size;
1557 length -= size;
1558 }
1559 while (length >= object_set_size) {
1560 int i;
1561 loff_t pos = offset;
1562 for (i = 0; i < stripe_count; ++i) {
1563 ret = ceph_zero_partial_object(inode, pos, NULL);
1564 if (ret < 0)
1565 return ret;
1566 pos += stripe_unit;
1567 }
1568 offset += object_set_size;
1569 length -= object_set_size;
1570 }
1571 while (length) {
1572 loff_t size = length;
1573 ret = ceph_zero_partial_object(inode, offset, &size);
1574 if (ret < 0)
1575 return ret;
1576 offset += size;
1577 length -= size;
1578 }
1579 return ret;
1580}
1581
1582static long ceph_fallocate(struct file *file, int mode,
1583 loff_t offset, loff_t length)
1584{
1585 struct ceph_file_info *fi = file->private_data;
aa8b60e0 1586 struct inode *inode = file_inode(file);
ad7a60de
LW
1587 struct ceph_inode_info *ci = ceph_inode(inode);
1588 struct ceph_osd_client *osdc =
1589 &ceph_inode_to_client(inode)->client->osdc;
f66fd9f0 1590 struct ceph_cap_flush *prealloc_cf;
ad7a60de
LW
1591 int want, got = 0;
1592 int dirty;
1593 int ret = 0;
1594 loff_t endoff = 0;
1595 loff_t size;
1596
494d77bf
YZ
1597 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1598 return -EOPNOTSUPP;
1599
ad7a60de
LW
1600 if (!S_ISREG(inode->i_mode))
1601 return -EOPNOTSUPP;
1602
f66fd9f0
YZ
1603 prealloc_cf = ceph_alloc_cap_flush();
1604 if (!prealloc_cf)
1605 return -ENOMEM;
1606
5955102c 1607 inode_lock(inode);
ad7a60de
LW
1608
1609 if (ceph_snap(inode) != CEPH_NOSNAP) {
1610 ret = -EROFS;
1611 goto unlock;
1612 }
1613
1614 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1615 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1616 ret = -ENOSPC;
1617 goto unlock;
1618 }
1619
28127bdd
YZ
1620 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1621 ret = ceph_uninline_data(file, NULL);
1622 if (ret < 0)
1623 goto unlock;
1624 }
1625
ad7a60de
LW
1626 size = i_size_read(inode);
1627 if (!(mode & FALLOC_FL_KEEP_SIZE))
1628 endoff = offset + length;
1629
1630 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1631 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1632 else
1633 want = CEPH_CAP_FILE_BUFFER;
1634
3738daa6 1635 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
ad7a60de
LW
1636 if (ret < 0)
1637 goto unlock;
1638
1639 if (mode & FALLOC_FL_PUNCH_HOLE) {
1640 if (offset < size)
1641 ceph_zero_pagecache_range(inode, offset, length);
1642 ret = ceph_zero_objects(inode, offset, length);
1643 } else if (endoff > size) {
1644 truncate_pagecache_range(inode, size, -1);
1645 if (ceph_inode_set_size(inode, endoff))
1646 ceph_check_caps(ceph_inode(inode),
1647 CHECK_CAPS_AUTHONLY, NULL);
1648 }
1649
1650 if (!ret) {
1651 spin_lock(&ci->i_ceph_lock);
28127bdd 1652 ci->i_inline_version = CEPH_INLINE_NONE;
f66fd9f0
YZ
1653 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1654 &prealloc_cf);
ad7a60de
LW
1655 spin_unlock(&ci->i_ceph_lock);
1656 if (dirty)
1657 __mark_inode_dirty(inode, dirty);
1658 }
1659
1660 ceph_put_cap_refs(ci, got);
1661unlock:
5955102c 1662 inode_unlock(inode);
f66fd9f0 1663 ceph_free_cap_flush(prealloc_cf);
ad7a60de
LW
1664 return ret;
1665}
1666
124e68e7
SW
1667const struct file_operations ceph_file_fops = {
1668 .open = ceph_open,
1669 .release = ceph_release,
1670 .llseek = ceph_llseek,
3644424d 1671 .read_iter = ceph_read_iter,
4908b822 1672 .write_iter = ceph_write_iter,
124e68e7
SW
1673 .mmap = ceph_mmap,
1674 .fsync = ceph_fsync,
40819f6f
GF
1675 .lock = ceph_lock,
1676 .flock = ceph_flock,
124e68e7 1677 .splice_read = generic_file_splice_read,
3551dd79 1678 .splice_write = iter_file_splice_write,
124e68e7
SW
1679 .unlocked_ioctl = ceph_ioctl,
1680 .compat_ioctl = ceph_ioctl,
ad7a60de 1681 .fallocate = ceph_fallocate,
124e68e7
SW
1682};
1683
This page took 0.352793 seconds and 5 git commands to generate.