staging: lustre: mdc: use __FMODE_EXEC macro
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / vvp_io.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_io for VVP layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44
67a235f5
GKH
45#include "../include/obd.h"
46#include "../include/lustre_lite.h"
d7e09d03
PT
47
48#include "vvp_internal.h"
49
50static struct vvp_io *cl2vvp_io(const struct lu_env *env,
51 const struct cl_io_slice *slice);
52
53/**
74c0da19 54 * True, if \a io is a normal io, False for splice_{read,write}
d7e09d03
PT
55 */
56int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
57{
58 struct vvp_io *vio = vvp_env_io(env);
59
60 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
61
62 return vio->cui_io_subtype == IO_NORMAL;
63}
64
65/**
66 * For swapping layout. The file's layout may have changed.
67 * To avoid populating pages to a wrong stripe, we have to verify the
68 * correctness of layout. It works because swapping layout processes
69 * have to acquire group lock.
70 */
71static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 struct inode *inode)
73{
74 struct ll_inode_info *lli = ll_i2info(inode);
75 struct ccc_io *cio = ccc_env_io(env);
76 bool rc = true;
77
78 switch (io->ci_type) {
79 case CIT_READ:
80 case CIT_WRITE:
81 /* don't need lock here to check lli_layout_gen as we have held
82 * extent lock and GROUP lock has to hold to swap layout */
09aed8a5 83 if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
d7e09d03
PT
84 io->ci_need_restart = 1;
85 /* this will return application a short read/write */
86 io->ci_continue = 0;
87 rc = false;
88 }
89 case CIT_FAULT:
90 /* fault is okay because we've already had a page. */
91 default:
92 break;
93 }
94
95 return rc;
96}
97
98/*****************************************************************************
99 *
100 * io operations.
101 *
102 */
103
104static int vvp_io_fault_iter_init(const struct lu_env *env,
105 const struct cl_io_slice *ios)
106{
107 struct vvp_io *vio = cl2vvp_io(env, ios);
108 struct inode *inode = ccc_object_inode(ios->cis_obj);
109
110 LASSERT(inode ==
111 cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
112 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
113 return 0;
114}
115
116static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
117{
118 struct cl_io *io = ios->cis_io;
119 struct cl_object *obj = io->ci_obj;
120 struct ccc_io *cio = cl2ccc_io(env, ios);
121
122 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
123
5ea17d6c
JL
124 CDEBUG(D_VFSTRACE, DFID
125 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 PFID(lu_object_fid(&obj->co_lu)),
127 io->ci_ignore_layout, io->ci_verify_layout,
128 cio->cui_layout_gen, io->ci_restore_needed);
129
130 if (io->ci_restore_needed == 1) {
131 int rc;
132
133 /* file was detected release, we need to restore it
134 * before finishing the io
135 */
136 rc = ll_layout_restore(ccc_object_inode(obj));
137 /* if restore registration failed, no restart,
138 * we will return -ENODATA */
139 /* The layout will change after restore, so we need to
140 * block on layout lock hold by the MDT
141 * as MDT will not send new layout in lvb (see LU-3124)
142 * we have to explicitly fetch it, all this will be done
143 * by ll_layout_refresh()
144 */
145 if (rc == 0) {
146 io->ci_restore_needed = 0;
147 io->ci_need_restart = 1;
148 io->ci_verify_layout = 1;
149 } else {
150 io->ci_restore_needed = 1;
151 io->ci_need_restart = 0;
152 io->ci_verify_layout = 0;
153 io->ci_result = rc;
154 }
155 }
d7e09d03
PT
156
157 if (!io->ci_ignore_layout && io->ci_verify_layout) {
158 __u32 gen = 0;
159
160 /* check layout version */
161 ll_layout_refresh(ccc_object_inode(obj), &gen);
162 io->ci_need_restart = cio->cui_layout_gen != gen;
5ea17d6c
JL
163 if (io->ci_need_restart) {
164 CDEBUG(D_VFSTRACE,
165 DFID" layout changed from %d to %d.\n",
166 PFID(lu_object_fid(&obj->co_lu)),
167 cio->cui_layout_gen, gen);
168 /* today successful restore is the only possible
169 * case */
170 /* restore was done, clear restoring state */
171 ll_i2info(ccc_object_inode(obj))->lli_flags &=
172 ~LLIF_FILE_RESTORING;
173 }
d7e09d03
PT
174 }
175}
176
177static void vvp_io_fault_fini(const struct lu_env *env,
178 const struct cl_io_slice *ios)
179{
180 struct cl_io *io = ios->cis_io;
181 struct cl_page *page = io->u.ci_fault.ft_page;
182
183 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
184
185 if (page != NULL) {
186 lu_ref_del(&page->cp_reference, "fault", io);
187 cl_page_put(env, page);
188 io->u.ci_fault.ft_page = NULL;
189 }
190 vvp_io_fini(env, ios);
191}
192
2d95f10e 193static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
d7e09d03
PT
194{
195 /*
196 * we only want to hold PW locks if the mmap() can generate
197 * writes back to the file and that only happens in shared
198 * writable vmas
199 */
200 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
201 return CLM_WRITE;
202 return CLM_READ;
203}
204
205static int vvp_mmap_locks(const struct lu_env *env,
206 struct ccc_io *vio, struct cl_io *io)
207{
208 struct ccc_thread_info *cti = ccc_env_info(env);
209 struct mm_struct *mm = current->mm;
210 struct vm_area_struct *vma;
211 struct cl_lock_descr *descr = &cti->cti_descr;
212 ldlm_policy_data_t policy;
213 unsigned long addr;
d7e09d03
PT
214 ssize_t count;
215 int result;
b42b15fd
AV
216 struct iov_iter i;
217 struct iovec iov;
d7e09d03
PT
218
219 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
220
221 if (!cl_is_normalio(env, io))
0a3bdb00 222 return 0;
d7e09d03 223
b42b15fd 224 if (vio->cui_iter == NULL) /* nfs or loop back device write */
0a3bdb00 225 return 0;
d7e09d03
PT
226
227 /* No MM (e.g. NFS)? No vmas too. */
228 if (mm == NULL)
0a3bdb00 229 return 0;
d7e09d03 230
b42b15fd
AV
231 iov_for_each(iov, i, *(vio->cui_iter)) {
232 addr = (unsigned long)iov.iov_base;
233 count = iov.iov_len;
d7e09d03
PT
234 if (count == 0)
235 continue;
236
237 count += addr & (~CFS_PAGE_MASK);
238 addr &= CFS_PAGE_MASK;
239
240 down_read(&mm->mmap_sem);
a58a38ac 241 while ((vma = our_vma(mm, addr, count)) != NULL) {
d7e09d03
PT
242 struct inode *inode = vma->vm_file->f_dentry->d_inode;
243 int flags = CEF_MUST;
244
245 if (ll_file_nolock(vma->vm_file)) {
246 /*
247 * For no lock case, a lockless lock will be
248 * generated.
249 */
250 flags = CEF_NEVER;
251 }
252
253 /*
254 * XXX: Required lock mode can be weakened: CIT_WRITE
255 * io only ever reads user level buffer, and CIT_READ
256 * only writes on it.
257 */
258 policy_from_vma(&policy, vma, addr, count);
259 descr->cld_mode = vvp_mode_from_vma(vma);
260 descr->cld_obj = ll_i2info(inode)->lli_clob;
261 descr->cld_start = cl_index(descr->cld_obj,
262 policy.l_extent.start);
263 descr->cld_end = cl_index(descr->cld_obj,
264 policy.l_extent.end);
265 descr->cld_enq_flags = flags;
266 result = cl_io_lock_alloc_add(env, io, descr);
267
268 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
269 descr->cld_mode, descr->cld_start,
270 descr->cld_end);
271
09516500
PF
272 if (result < 0) {
273 up_read(&mm->mmap_sem);
0a3bdb00 274 return result;
09516500 275 }
d7e09d03
PT
276
277 if (vma->vm_end - addr >= count)
278 break;
279
280 count -= vma->vm_end - addr;
281 addr = vma->vm_end;
282 }
283 up_read(&mm->mmap_sem);
284 }
0a3bdb00 285 return 0;
d7e09d03
PT
286}
287
288static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
289 enum cl_lock_mode mode, loff_t start, loff_t end)
290{
291 struct ccc_io *cio = ccc_env_io(env);
292 int result;
293 int ast_flags = 0;
294
295 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
d7e09d03
PT
296
297 ccc_io_update_iov(env, cio, io);
298
299 if (io->u.ci_rw.crw_nonblock)
300 ast_flags |= CEF_NONBLOCK;
301 result = vvp_mmap_locks(env, cio, io);
302 if (result == 0)
303 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
0a3bdb00 304 return result;
d7e09d03
PT
305}
306
307static int vvp_io_read_lock(const struct lu_env *env,
308 const struct cl_io_slice *ios)
309{
310 struct cl_io *io = ios->cis_io;
311 struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
312 int result;
313
d7e09d03
PT
314 /* XXX: Layer violation, we shouldn't see lsm at llite level. */
315 if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
316 result = vvp_io_rw_lock(env, io, CLM_READ,
317 io->u.ci_rd.rd.crw_pos,
318 io->u.ci_rd.rd.crw_pos +
319 io->u.ci_rd.rd.crw_count - 1);
320 else
321 result = 0;
0a3bdb00 322 return result;
d7e09d03
PT
323}
324
325static int vvp_io_fault_lock(const struct lu_env *env,
326 const struct cl_io_slice *ios)
327{
328 struct cl_io *io = ios->cis_io;
329 struct vvp_io *vio = cl2vvp_io(env, ios);
330 /*
331 * XXX LDLM_FL_CBPENDING
332 */
333 return ccc_io_one_lock_index
334 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
335 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
336}
337
338static int vvp_io_write_lock(const struct lu_env *env,
339 const struct cl_io_slice *ios)
340{
341 struct cl_io *io = ios->cis_io;
342 loff_t start;
343 loff_t end;
344
345 if (io->u.ci_wr.wr_append) {
346 start = 0;
347 end = OBD_OBJECT_EOF;
348 } else {
349 start = io->u.ci_wr.wr.crw_pos;
350 end = start + io->u.ci_wr.wr.crw_count - 1;
351 }
352 return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
353}
354
355static int vvp_io_setattr_iter_init(const struct lu_env *env,
356 const struct cl_io_slice *ios)
357{
358 return 0;
359}
360
361/**
362 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
363 *
364 * Handles "lockless io" mode when extent locking is done by server.
365 */
366static int vvp_io_setattr_lock(const struct lu_env *env,
367 const struct cl_io_slice *ios)
368{
369 struct ccc_io *cio = ccc_env_io(env);
370 struct cl_io *io = ios->cis_io;
371 __u64 new_size;
372 __u32 enqflags = 0;
373
374 if (cl_io_is_trunc(io)) {
375 new_size = io->u.ci_setattr.sa_attr.lvb_size;
376 if (new_size == 0)
377 enqflags = CEF_DISCARD_DATA;
378 } else {
379 if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
380 io->u.ci_setattr.sa_attr.lvb_ctime) ||
381 (io->u.ci_setattr.sa_attr.lvb_atime >=
382 io->u.ci_setattr.sa_attr.lvb_ctime))
383 return 0;
384 new_size = 0;
385 }
386 cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
387 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
388 new_size, OBD_OBJECT_EOF);
389}
390
391static int vvp_do_vmtruncate(struct inode *inode, size_t size)
392{
393 int result;
394 /*
395 * Only ll_inode_size_lock is taken at this level.
396 */
397 ll_inode_size_lock(inode);
398 result = inode_newsize_ok(inode, size);
399 if (result < 0) {
400 ll_inode_size_unlock(inode);
401 return result;
402 }
403 truncate_setsize(inode, size);
404 ll_inode_size_unlock(inode);
405 return result;
406}
407
408static int vvp_io_setattr_trunc(const struct lu_env *env,
409 const struct cl_io_slice *ios,
410 struct inode *inode, loff_t size)
411{
412 inode_dio_wait(inode);
413 return 0;
414}
415
416static int vvp_io_setattr_time(const struct lu_env *env,
417 const struct cl_io_slice *ios)
418{
419 struct cl_io *io = ios->cis_io;
420 struct cl_object *obj = io->ci_obj;
421 struct cl_attr *attr = ccc_env_thread_attr(env);
422 int result;
423 unsigned valid = CAT_CTIME;
424
425 cl_object_attr_lock(obj);
426 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
427 if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
428 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
429 valid |= CAT_ATIME;
430 }
431 if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
432 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
433 valid |= CAT_MTIME;
434 }
435 result = cl_object_attr_set(env, obj, attr, valid);
436 cl_object_attr_unlock(obj);
437
438 return result;
439}
440
441static int vvp_io_setattr_start(const struct lu_env *env,
442 const struct cl_io_slice *ios)
443{
444 struct cl_io *io = ios->cis_io;
445 struct inode *inode = ccc_object_inode(io->ci_obj);
5dd16419 446 int result = 0;
d7e09d03
PT
447
448 mutex_lock(&inode->i_mutex);
449 if (cl_io_is_trunc(io))
5dd16419
JX
450 result = vvp_io_setattr_trunc(env, ios, inode,
451 io->u.ci_setattr.sa_attr.lvb_size);
452 if (result == 0)
453 result = vvp_io_setattr_time(env, ios);
454 return result;
d7e09d03
PT
455}
456
457static void vvp_io_setattr_end(const struct lu_env *env,
458 const struct cl_io_slice *ios)
459{
460 struct cl_io *io = ios->cis_io;
461 struct inode *inode = ccc_object_inode(io->ci_obj);
462
463 if (cl_io_is_trunc(io)) {
464 /* Truncate in memory pages - they must be clean pages
465 * because osc has already notified to destroy osc_extents. */
466 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
467 inode_dio_write_done(inode);
468 }
469 mutex_unlock(&inode->i_mutex);
470}
471
472static void vvp_io_setattr_fini(const struct lu_env *env,
473 const struct cl_io_slice *ios)
474{
475 vvp_io_fini(env, ios);
476}
477
d7e09d03
PT
478static int vvp_io_read_start(const struct lu_env *env,
479 const struct cl_io_slice *ios)
480{
481 struct vvp_io *vio = cl2vvp_io(env, ios);
482 struct ccc_io *cio = cl2ccc_io(env, ios);
483 struct cl_io *io = ios->cis_io;
484 struct cl_object *obj = io->ci_obj;
485 struct inode *inode = ccc_object_inode(obj);
486 struct ll_ra_read *bead = &vio->cui_bead;
487 struct file *file = cio->cui_fd->fd_file;
488
489 int result;
490 loff_t pos = io->u.ci_rd.rd.crw_pos;
491 long cnt = io->u.ci_rd.rd.crw_count;
492 long tot = cio->cui_tot_count;
493 int exceed = 0;
494
495 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
496
497 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
498
499 if (!can_populate_pages(env, io, inode))
500 return 0;
501
502 result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
503 if (result != 0)
504 return result;
505 else if (exceed != 0)
506 goto out;
507
508 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
509 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
510 inode->i_ino, cnt, pos, i_size_read(inode));
511
512 /* turn off the kernel's read-ahead */
513 cio->cui_fd->fd_file->f_ra.ra_pages = 0;
514
515 /* initialize read-ahead window once per syscall */
516 if (!vio->cui_ra_window_set) {
517 vio->cui_ra_window_set = 1;
518 bead->lrr_start = cl_index(obj, pos);
519 /*
520 * XXX: explicit PAGE_CACHE_SIZE
521 */
522 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
523 ll_ra_read_in(file, bead);
524 }
525
526 /* BUG: 5972 */
527 file_accessed(file);
528 switch (vio->cui_io_subtype) {
529 case IO_NORMAL:
74c0da19 530 LASSERT(cio->cui_iocb->ki_pos == pos);
b42b15fd 531 result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
74c0da19 532 break;
d7e09d03
PT
533 case IO_SPLICE:
534 result = generic_file_splice_read(file, &pos,
535 vio->u.splice.cui_pipe, cnt,
536 vio->u.splice.cui_flags);
537 /* LU-1109: do splice read stripe by stripe otherwise if it
538 * may make nfsd stuck if this read occupied all internal pipe
539 * buffers. */
540 io->ci_continue = 0;
541 break;
542 default:
543 CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
544 LBUG();
545 }
546
547out:
548 if (result >= 0) {
549 if (result < cnt)
550 io->ci_continue = 0;
551 io->ci_nob += result;
552 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
4f37bc04 553 cio->cui_fd, pos, result, READ);
d7e09d03
PT
554 result = 0;
555 }
556 return result;
557}
558
559static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
560{
561 struct vvp_io *vio = cl2vvp_io(env, ios);
562 struct ccc_io *cio = cl2ccc_io(env, ios);
563
564 if (vio->cui_ra_window_set)
565 ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
566
567 vvp_io_fini(env, ios);
568}
569
570static int vvp_io_write_start(const struct lu_env *env,
571 const struct cl_io_slice *ios)
572{
573 struct ccc_io *cio = cl2ccc_io(env, ios);
574 struct cl_io *io = ios->cis_io;
575 struct cl_object *obj = io->ci_obj;
576 struct inode *inode = ccc_object_inode(obj);
d7e09d03
PT
577 ssize_t result = 0;
578 loff_t pos = io->u.ci_wr.wr.crw_pos;
579 size_t cnt = io->u.ci_wr.wr.crw_count;
580
d7e09d03
PT
581 if (!can_populate_pages(env, io, inode))
582 return 0;
583
584 if (cl_io_is_append(io)) {
585 /*
586 * PARALLEL IO This has to be changed for parallel IO doing
587 * out-of-order writes.
588 */
589 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
590 cio->cui_iocb->ki_pos = pos;
74c0da19
JX
591 } else {
592 LASSERT(cio->cui_iocb->ki_pos == pos);
d7e09d03
PT
593 }
594
595 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
596
b42b15fd 597 if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
d7e09d03
PT
598 result = 0;
599 else
b42b15fd
AV
600 result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
601
d7e09d03
PT
602 if (result > 0) {
603 if (result < cnt)
604 io->ci_continue = 0;
605 io->ci_nob += result;
606 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
4f37bc04 607 cio->cui_fd, pos, result, WRITE);
d7e09d03
PT
608 result = 0;
609 }
0a3bdb00 610 return result;
d7e09d03
PT
611}
612
613static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
614{
615 struct vm_fault *vmf = cfio->fault.ft_vmf;
616
617 cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
6aa51072 618 cfio->fault.ft_flags_valid = 1;
d7e09d03
PT
619
620 if (vmf->page) {
aa3bee0d
GKH
621 CDEBUG(D_PAGE,
622 "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
623 vmf->page, vmf->page->mapping, vmf->page->index,
624 (long)vmf->page->flags, page_count(vmf->page),
625 page_private(vmf->page), vmf->virtual_address);
d7e09d03
PT
626 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
627 lock_page(vmf->page);
733bd244 628 cfio->fault.ft_flags |= VM_FAULT_LOCKED;
d7e09d03
PT
629 }
630
631 cfio->ft_vmpage = vmf->page;
632 return 0;
633 }
634
635 if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
636 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
637 return -EFAULT;
638 }
639
640 if (cfio->fault.ft_flags & VM_FAULT_OOM) {
641 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
642 return -ENOMEM;
643 }
644
645 if (cfio->fault.ft_flags & VM_FAULT_RETRY)
646 return -EAGAIN;
647
d0a0acc3 648 CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
d7e09d03
PT
649 return -EINVAL;
650}
651
652
653static int vvp_io_fault_start(const struct lu_env *env,
654 const struct cl_io_slice *ios)
655{
656 struct vvp_io *vio = cl2vvp_io(env, ios);
657 struct cl_io *io = ios->cis_io;
658 struct cl_object *obj = io->ci_obj;
659 struct inode *inode = ccc_object_inode(obj);
660 struct cl_fault_io *fio = &io->u.ci_fault;
661 struct vvp_fault_io *cfio = &vio->u.fault;
662 loff_t offset;
663 int result = 0;
664 struct page *vmpage = NULL;
665 struct cl_page *page;
666 loff_t size;
667 pgoff_t last; /* last page in a file data region */
668
669 if (fio->ft_executable &&
670 LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
671 CWARN("binary "DFID
672 " changed while waiting for the page fault lock\n",
673 PFID(lu_object_fid(&obj->co_lu)));
674
675 /* offset of the last byte on the page */
676 offset = cl_offset(obj, fio->ft_index + 1) - 1;
677 LASSERT(cl_index(obj, offset) == fio->ft_index);
678 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
679 if (result != 0)
680 return result;
681
682 /* must return locked page */
683 if (fio->ft_mkwrite) {
684 LASSERT(cfio->ft_vmpage != NULL);
685 lock_page(cfio->ft_vmpage);
686 } else {
687 result = vvp_io_kernel_fault(cfio);
688 if (result != 0)
689 return result;
690 }
691
692 vmpage = cfio->ft_vmpage;
693 LASSERT(PageLocked(vmpage));
694
695 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
696 ll_invalidate_page(vmpage);
697
698 size = i_size_read(inode);
699 /* Though we have already held a cl_lock upon this page, but
700 * it still can be truncated locally. */
701 if (unlikely((vmpage->mapping != inode->i_mapping) ||
702 (page_offset(vmpage) > size))) {
703 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
704
705 /* return +1 to stop cl_io_loop() and ll_fault() will catch
706 * and retry. */
34e1f2bb
JL
707 result = +1;
708 goto out;
d7e09d03
PT
709 }
710
711
557732ad 712 if (fio->ft_mkwrite) {
d7e09d03
PT
713 pgoff_t last_index;
714 /*
715 * Capture the size while holding the lli_trunc_sem from above
716 * we want to make sure that we complete the mkwrite action
717 * while holding this lock. We need to make sure that we are
718 * not past the end of the file.
719 */
720 last_index = cl_index(obj, size - 1);
721 if (last_index < fio->ft_index) {
722 CDEBUG(D_PAGE,
723 "llite: mkwrite and truncate race happened: "
724 "%p: 0x%lx 0x%lx\n",
1d8cb70c 725 vmpage->mapping, fio->ft_index, last_index);
d7e09d03
PT
726 /*
727 * We need to return if we are
728 * passed the end of the file. This will propagate
729 * up the call stack to ll_page_mkwrite where
730 * we will return VM_FAULT_NOPAGE. Any non-negative
731 * value returned here will be silently
732 * converted to 0. If the vmpage->mapping is null
733 * the error code would be converted back to ENODATA
734 * in ll_page_mkwrite0. Thus we return -ENODATA
735 * to handle both cases
736 */
34e1f2bb
JL
737 result = -ENODATA;
738 goto out;
d7e09d03
PT
739 }
740 }
741
742 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
34e1f2bb
JL
743 if (IS_ERR(page)) {
744 result = PTR_ERR(page);
745 goto out;
746 }
d7e09d03
PT
747
748 /* if page is going to be written, we should add this page into cache
749 * earlier. */
750 if (fio->ft_mkwrite) {
751 wait_on_page_writeback(vmpage);
752 if (set_page_dirty(vmpage)) {
753 struct ccc_page *cp;
754
755 /* vvp_page_assume() calls wait_on_page_writeback(). */
756 cl_page_assume(env, io, page);
757
758 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
759 vvp_write_pending(cl2ccc(obj), cp);
760
761 /* Do not set Dirty bit here so that in case IO is
762 * started before the page is really made dirty, we
763 * still have chance to detect it. */
764 result = cl_page_cache_add(env, io, page, CRT_WRITE);
765 LASSERT(cl_page_is_owned(page, io));
766
767 vmpage = NULL;
768 if (result < 0) {
769 cl_page_unmap(env, io, page);
770 cl_page_discard(env, io, page);
771 cl_page_disown(env, io, page);
772
773 cl_page_put(env, page);
774
775 /* we're in big trouble, what can we do now? */
776 if (result == -EDQUOT)
777 result = -ENOSPC;
34e1f2bb 778 goto out;
d7e09d03
PT
779 } else
780 cl_page_disown(env, io, page);
781 }
782 }
783
784 last = cl_index(obj, size - 1);
785 /*
786 * The ft_index is only used in the case of
787 * a mkwrite action. We need to check
788 * our assertions are correct, since
789 * we should have caught this above
790 */
791 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
792 if (fio->ft_index == last)
793 /*
794 * Last page is mapped partially.
795 */
796 fio->ft_nob = size - cl_offset(obj, fio->ft_index);
797 else
798 fio->ft_nob = cl_page_size(obj);
799
800 lu_ref_add(&page->cp_reference, "fault", io);
801 fio->ft_page = page;
d7e09d03
PT
802
803out:
804 /* return unlocked vmpage to avoid deadlocking */
805 if (vmpage != NULL)
806 unlock_page(vmpage);
807 cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
808 return result;
809}
810
811static int vvp_io_fsync_start(const struct lu_env *env,
812 const struct cl_io_slice *ios)
813{
814 /* we should mark TOWRITE bit to each dirty page in radix tree to
815 * verify pages have been written, but this is difficult because of
816 * race. */
817 return 0;
818}
819
820static int vvp_io_read_page(const struct lu_env *env,
821 const struct cl_io_slice *ios,
822 const struct cl_page_slice *slice)
823{
824 struct cl_io *io = ios->cis_io;
825 struct cl_object *obj = slice->cpl_obj;
826 struct ccc_page *cp = cl2ccc_page(slice);
827 struct cl_page *page = slice->cpl_page;
828 struct inode *inode = ccc_object_inode(obj);
829 struct ll_sb_info *sbi = ll_i2sbi(inode);
830 struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
831 struct ll_readahead_state *ras = &fd->fd_ras;
832 struct page *vmpage = cp->cpg_page;
833 struct cl_2queue *queue = &io->ci_queue;
834 int rc;
835
836 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
837 LASSERT(slice->cpl_obj == obj);
838
d7e09d03
PT
839 if (sbi->ll_ra_info.ra_max_pages_per_file &&
840 sbi->ll_ra_info.ra_max_pages)
841 ras_update(sbi, inode, ras, page->cp_index,
842 cp->cpg_defer_uptodate);
843
844 /* Sanity check whether the page is protected by a lock. */
845 rc = cl_page_is_under_lock(env, io, page);
846 if (rc != -EBUSY) {
847 CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
848 rc == -ENODATA ? "without a lock" :
849 "match failed", rc);
850 if (rc != -ENODATA)
0a3bdb00 851 return rc;
d7e09d03
PT
852 }
853
854 if (cp->cpg_defer_uptodate) {
855 cp->cpg_ra_used = 1;
856 cl_page_export(env, page, 1);
857 }
858 /*
859 * Add page into the queue even when it is marked uptodate above.
860 * this will unlock it automatically as part of cl_page_list_disown().
861 */
862 cl_2queue_add(queue, page);
863 if (sbi->ll_ra_info.ra_max_pages_per_file &&
864 sbi->ll_ra_info.ra_max_pages)
865 ll_readahead(env, io, ras,
866 vmpage->mapping, &queue->c2_qin, fd->fd_flags);
867
0a3bdb00 868 return 0;
d7e09d03
PT
869}
870
871static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
872 struct cl_page *page, struct ccc_page *cp,
873 enum cl_req_type crt)
874{
875 struct cl_2queue *queue;
876 int result;
877
878 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
879
880 queue = &io->ci_queue;
881 cl_2queue_init_page(queue, page);
882
883 result = cl_io_submit_sync(env, io, crt, queue, 0);
884 LASSERT(cl_page_is_owned(page, io));
885
886 if (crt == CRT_READ)
887 /*
888 * in CRT_WRITE case page is left locked even in case of
889 * error.
890 */
891 cl_page_list_disown(env, io, &queue->c2_qin);
892 cl_2queue_fini(env, queue);
893
894 return result;
895}
896
897/**
898 * Prepare partially written-to page for a write.
899 */
900static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
901 struct cl_object *obj, struct cl_page *pg,
902 struct ccc_page *cp,
903 unsigned from, unsigned to)
904{
905 struct cl_attr *attr = ccc_env_thread_attr(env);
906 loff_t offset = cl_offset(obj, pg->cp_index);
907 int result;
908
909 cl_object_attr_lock(obj);
910 result = cl_object_attr_get(env, obj, attr);
911 cl_object_attr_unlock(obj);
912 if (result == 0) {
913 /*
914 * If are writing to a new page, no need to read old data.
915 * The extent locking will have updated the KMS, and for our
916 * purposes here we can treat it like i_size.
917 */
918 if (attr->cat_kms <= offset) {
5e8ebf13 919 char *kaddr = kmap_atomic(cp->cpg_page);
d7e09d03
PT
920
921 memset(kaddr, 0, cl_page_size(obj));
5e8ebf13 922 kunmap_atomic(kaddr);
d7e09d03
PT
923 } else if (cp->cpg_defer_uptodate)
924 cp->cpg_ra_used = 1;
925 else
926 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
927 /*
928 * In older implementations, obdo_refresh_inode is called here
929 * to update the inode because the write might modify the
930 * object info at OST. However, this has been proven useless,
931 * since LVB functions will be called when user space program
932 * tries to retrieve inode attribute. Also, see bug 15909 for
933 * details. -jay
934 */
935 if (result == 0)
936 cl_page_export(env, pg, 1);
937 }
938 return result;
939}
940
941static int vvp_io_prepare_write(const struct lu_env *env,
942 const struct cl_io_slice *ios,
943 const struct cl_page_slice *slice,
944 unsigned from, unsigned to)
945{
946 struct cl_object *obj = slice->cpl_obj;
947 struct ccc_page *cp = cl2ccc_page(slice);
948 struct cl_page *pg = slice->cpl_page;
949 struct page *vmpage = cp->cpg_page;
950
951 int result;
952
d7e09d03
PT
953 LINVRNT(cl_page_is_vmlocked(env, pg));
954 LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
955
956 result = 0;
957
958 CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
959 if (!PageUptodate(vmpage)) {
960 /*
961 * We're completely overwriting an existing page, so _don't_
962 * set it up to date until commit_write
963 */
964 if (from == 0 && to == PAGE_CACHE_SIZE) {
965 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
966 POISON_PAGE(page, 0x11);
967 } else
968 result = vvp_io_prepare_partial(env, ios->cis_io, obj,
969 pg, cp, from, to);
970 } else
971 CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
0a3bdb00 972 return result;
d7e09d03
PT
973}
974
975static int vvp_io_commit_write(const struct lu_env *env,
976 const struct cl_io_slice *ios,
977 const struct cl_page_slice *slice,
978 unsigned from, unsigned to)
979{
980 struct cl_object *obj = slice->cpl_obj;
981 struct cl_io *io = ios->cis_io;
982 struct ccc_page *cp = cl2ccc_page(slice);
983 struct cl_page *pg = slice->cpl_page;
984 struct inode *inode = ccc_object_inode(obj);
985 struct ll_sb_info *sbi = ll_i2sbi(inode);
986 struct ll_inode_info *lli = ll_i2info(inode);
987 struct page *vmpage = cp->cpg_page;
988
989 int result;
990 int tallyop;
991 loff_t size;
992
d7e09d03
PT
993 LINVRNT(cl_page_is_vmlocked(env, pg));
994 LASSERT(vmpage->mapping->host == inode);
995
c4f39553 996 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
d7e09d03
PT
997 CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
998
999 /*
1000 * queue a write for some time in the future the first time we
1001 * dirty the page.
1002 *
1003 * This is different from what other file systems do: they usually
1004 * just mark page (and some of its buffers) dirty and rely on
1005 * balance_dirty_pages() to start a write-back. Lustre wants write-back
1006 * to be started earlier for the following reasons:
1007 *
1008 * (1) with a large number of clients we need to limit the amount
1009 * of cached data on the clients a lot;
1010 *
1011 * (2) large compute jobs generally want compute-only then io-only
1012 * and the IO should complete as quickly as possible;
1013 *
1014 * (3) IO is batched up to the RPC size and is async until the
1015 * client max cache is hit
1016 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1017 *
1018 */
1019 if (!PageDirty(vmpage)) {
1020 tallyop = LPROC_LL_DIRTY_MISSES;
1021 result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1022 if (result == 0) {
1023 /* page was added into cache successfully. */
1024 set_page_dirty(vmpage);
1025 vvp_write_pending(cl2ccc(obj), cp);
1026 } else if (result == -EDQUOT) {
1027 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
1028 bool need_clip = true;
1029
1030 /*
1031 * Client ran out of disk space grant. Possible
1032 * strategies are:
1033 *
1034 * (a) do a sync write, renewing grant;
1035 *
1036 * (b) stop writing on this stripe, switch to the
1037 * next one.
1038 *
1039 * (b) is a part of "parallel io" design that is the
1040 * ultimate goal. (a) is what "old" client did, and
1041 * what the new code continues to do for the time
1042 * being.
1043 */
1044 if (last_index > pg->cp_index) {
1045 to = PAGE_CACHE_SIZE;
1046 need_clip = false;
1047 } else if (last_index == pg->cp_index) {
1048 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1049 if (to < size_to)
1050 to = size_to;
1051 }
1052 if (need_clip)
1053 cl_page_clip(env, pg, 0, to);
1054 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1055 if (result)
1056 CERROR("Write page %lu of inode %p failed %d\n",
1057 pg->cp_index, inode, result);
1058 }
1059 } else {
1060 tallyop = LPROC_LL_DIRTY_HITS;
1061 result = 0;
1062 }
1063 ll_stats_ops_tally(sbi, tallyop, 1);
1064
1065 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1066 * because page could have been not flushed between 2 modifications.
1067 * It is important the file is marked DIRTY as soon as the I/O is done
1068 * Indeed, when cache is flushed, file could be already closed and it
1069 * is too late to warn the MDT.
1070 * It is acceptable that file is marked DIRTY even if I/O is dropped
1071 * for some reasons before being flushed to OST.
1072 */
1073 if (result == 0) {
1074 spin_lock(&lli->lli_lock);
1075 lli->lli_flags |= LLIF_DATA_MODIFIED;
1076 spin_unlock(&lli->lli_lock);
1077 }
1078
1079 size = cl_offset(obj, pg->cp_index) + to;
1080
1081 ll_inode_size_lock(inode);
1082 if (result == 0) {
1083 if (size > i_size_read(inode)) {
1084 cl_isize_write_nolock(inode, size);
1085 CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1086 PFID(lu_object_fid(&obj->co_lu)),
1087 (unsigned long)size);
1088 }
1089 cl_page_export(env, pg, 1);
1090 } else {
1091 if (size > i_size_read(inode))
1092 cl_page_discard(env, io, pg);
1093 }
1094 ll_inode_size_unlock(inode);
0a3bdb00 1095 return result;
d7e09d03
PT
1096}
1097
1098static const struct cl_io_operations vvp_io_ops = {
1099 .op = {
1100 [CIT_READ] = {
1101 .cio_fini = vvp_io_read_fini,
1102 .cio_lock = vvp_io_read_lock,
1103 .cio_start = vvp_io_read_start,
1104 .cio_advance = ccc_io_advance
1105 },
1106 [CIT_WRITE] = {
1107 .cio_fini = vvp_io_fini,
1108 .cio_lock = vvp_io_write_lock,
1109 .cio_start = vvp_io_write_start,
1110 .cio_advance = ccc_io_advance
1111 },
1112 [CIT_SETATTR] = {
1113 .cio_fini = vvp_io_setattr_fini,
1114 .cio_iter_init = vvp_io_setattr_iter_init,
1115 .cio_lock = vvp_io_setattr_lock,
1116 .cio_start = vvp_io_setattr_start,
1117 .cio_end = vvp_io_setattr_end
1118 },
1119 [CIT_FAULT] = {
1120 .cio_fini = vvp_io_fault_fini,
1121 .cio_iter_init = vvp_io_fault_iter_init,
1122 .cio_lock = vvp_io_fault_lock,
1123 .cio_start = vvp_io_fault_start,
1124 .cio_end = ccc_io_end
1125 },
1126 [CIT_FSYNC] = {
1127 .cio_start = vvp_io_fsync_start,
1128 .cio_fini = vvp_io_fini
1129 },
1130 [CIT_MISC] = {
1131 .cio_fini = vvp_io_fini
1132 }
1133 },
1134 .cio_read_page = vvp_io_read_page,
1135 .cio_prepare_write = vvp_io_prepare_write,
1136 .cio_commit_write = vvp_io_commit_write
1137};
1138
1139int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1140 struct cl_io *io)
1141{
1142 struct vvp_io *vio = vvp_env_io(env);
1143 struct ccc_io *cio = ccc_env_io(env);
1144 struct inode *inode = ccc_object_inode(obj);
1145 int result;
1146
1147 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
d7e09d03 1148
5ea17d6c
JL
1149 CDEBUG(D_VFSTRACE, DFID
1150 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1151 PFID(lu_object_fid(&obj->co_lu)),
1152 io->ci_ignore_layout, io->ci_verify_layout,
1153 cio->cui_layout_gen, io->ci_restore_needed);
1154
d7e09d03
PT
1155 CL_IO_SLICE_CLEAN(cio, cui_cl);
1156 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1157 vio->cui_ra_window_set = 0;
1158 result = 0;
1159 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1160 size_t count;
1161 struct ll_inode_info *lli = ll_i2info(inode);
1162
1163 count = io->u.ci_rw.crw_count;
1164 /* "If nbyte is 0, read() will return 0 and have no other
1165 * results." -- Single Unix Spec */
1166 if (count == 0)
1167 result = 1;
b42b15fd 1168 else
d7e09d03 1169 cio->cui_tot_count = count;
b42b15fd 1170
d7e09d03
PT
1171 /* for read/write, we store the jobid in the inode, and
1172 * it'll be fetched by osc when building RPC.
1173 *
1174 * it's not accurate if the file is shared by different
1175 * jobs.
1176 */
1177 lustre_get_jobid(lli->lli_jobid);
1178 } else if (io->ci_type == CIT_SETATTR) {
1179 if (!cl_io_is_trunc(io))
1180 io->ci_lockreq = CILR_MANDATORY;
1181 }
1182
1183 /* ignore layout change for generic CIT_MISC but not for glimpse.
1184 * io context for glimpse must set ci_verify_layout to true,
1185 * see cl_glimpse_size0() for details. */
1186 if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1187 io->ci_ignore_layout = 1;
1188
1189 /* Enqueue layout lock and get layout version. We need to do this
1190 * even for operations requiring to open file, such as read and write,
1191 * because it might not grant layout lock in IT_OPEN. */
65fb55d1 1192 if (result == 0 && !io->ci_ignore_layout) {
d7e09d03 1193 result = ll_layout_refresh(inode, &cio->cui_layout_gen);
65fb55d1
NY
1194 if (result == -ENOENT)
1195 /* If the inode on MDS has been removed, but the objects
1196 * on OSTs haven't been destroyed (async unlink), layout
d0a0acc3 1197 * fetch will return -ENOENT, we'd ignore this error
65fb55d1
NY
1198 * and continue with dirty flush. LU-3230. */
1199 result = 0;
1200 if (result < 0)
1201 CERROR("%s: refresh file layout " DFID " error %d.\n",
1202 ll_get_fsname(inode->i_sb, NULL, 0),
1203 PFID(lu_object_fid(&obj->co_lu)), result);
1204 }
d7e09d03 1205
0a3bdb00 1206 return result;
d7e09d03
PT
1207}
1208
1209static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1210 const struct cl_io_slice *slice)
1211{
d0a0acc3 1212 /* Calling just for assertion */
d7e09d03
PT
1213 cl2ccc_io(env, slice);
1214 return vvp_env_io(env);
1215}
This page took 0.362157 seconds and 5 git commands to generate.