4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
45 #include "../include/obd.h"
46 #include "../include/lustre_lite.h"
48 #include "vvp_internal.h"
50 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
51 const struct cl_io_slice
*slice
);
54 * True, if \a io is a normal io, False for splice_{read,write}
56 int cl_is_normalio(const struct lu_env
*env
, const struct cl_io
*io
)
58 struct vvp_io
*vio
= vvp_env_io(env
);
60 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
62 return vio
->cui_io_subtype
== IO_NORMAL
;
66 * For swapping layout. The file's layout may have changed.
67 * To avoid populating pages to a wrong stripe, we have to verify the
68 * correctness of layout. It works because swapping layout processes
69 * have to acquire group lock.
71 static bool can_populate_pages(const struct lu_env
*env
, struct cl_io
*io
,
74 struct ll_inode_info
*lli
= ll_i2info(inode
);
75 struct ccc_io
*cio
= ccc_env_io(env
);
78 switch (io
->ci_type
) {
81 /* don't need lock here to check lli_layout_gen as we have held
82 * extent lock and GROUP lock has to hold to swap layout */
83 if (ll_layout_version_get(lli
) != cio
->cui_layout_gen
) {
84 io
->ci_need_restart
= 1;
85 /* this will return application a short read/write */
90 /* fault is okay because we've already had a page. */
98 /*****************************************************************************
104 static int vvp_io_fault_iter_init(const struct lu_env
*env
,
105 const struct cl_io_slice
*ios
)
107 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
108 struct inode
*inode
= ccc_object_inode(ios
->cis_obj
);
111 cl2ccc_io(env
, ios
)->cui_fd
->fd_file
->f_dentry
->d_inode
);
112 vio
->u
.fault
.ft_mtime
= LTIME_S(inode
->i_mtime
);
116 static void vvp_io_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
118 struct cl_io
*io
= ios
->cis_io
;
119 struct cl_object
*obj
= io
->ci_obj
;
120 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
122 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
124 CDEBUG(D_VFSTRACE
, DFID
125 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 PFID(lu_object_fid(&obj
->co_lu
)),
127 io
->ci_ignore_layout
, io
->ci_verify_layout
,
128 cio
->cui_layout_gen
, io
->ci_restore_needed
);
130 if (io
->ci_restore_needed
== 1) {
133 /* file was detected release, we need to restore it
134 * before finishing the io
136 rc
= ll_layout_restore(ccc_object_inode(obj
));
137 /* if restore registration failed, no restart,
138 * we will return -ENODATA */
139 /* The layout will change after restore, so we need to
140 * block on layout lock hold by the MDT
141 * as MDT will not send new layout in lvb (see LU-3124)
142 * we have to explicitly fetch it, all this will be done
143 * by ll_layout_refresh()
146 io
->ci_restore_needed
= 0;
147 io
->ci_need_restart
= 1;
148 io
->ci_verify_layout
= 1;
150 io
->ci_restore_needed
= 1;
151 io
->ci_need_restart
= 0;
152 io
->ci_verify_layout
= 0;
157 if (!io
->ci_ignore_layout
&& io
->ci_verify_layout
) {
160 /* check layout version */
161 ll_layout_refresh(ccc_object_inode(obj
), &gen
);
162 io
->ci_need_restart
= cio
->cui_layout_gen
!= gen
;
163 if (io
->ci_need_restart
) {
165 DFID
" layout changed from %d to %d.\n",
166 PFID(lu_object_fid(&obj
->co_lu
)),
167 cio
->cui_layout_gen
, gen
);
168 /* today successful restore is the only possible
170 /* restore was done, clear restoring state */
171 ll_i2info(ccc_object_inode(obj
))->lli_flags
&=
172 ~LLIF_FILE_RESTORING
;
177 static void vvp_io_fault_fini(const struct lu_env
*env
,
178 const struct cl_io_slice
*ios
)
180 struct cl_io
*io
= ios
->cis_io
;
181 struct cl_page
*page
= io
->u
.ci_fault
.ft_page
;
183 CLOBINVRNT(env
, io
->ci_obj
, ccc_object_invariant(io
->ci_obj
));
186 lu_ref_del(&page
->cp_reference
, "fault", io
);
187 cl_page_put(env
, page
);
188 io
->u
.ci_fault
.ft_page
= NULL
;
190 vvp_io_fini(env
, ios
);
193 static enum cl_lock_mode
vvp_mode_from_vma(struct vm_area_struct
*vma
)
196 * we only want to hold PW locks if the mmap() can generate
197 * writes back to the file and that only happens in shared
200 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_WRITE
))
205 static int vvp_mmap_locks(const struct lu_env
*env
,
206 struct ccc_io
*vio
, struct cl_io
*io
)
208 struct ccc_thread_info
*cti
= ccc_env_info(env
);
209 struct mm_struct
*mm
= current
->mm
;
210 struct vm_area_struct
*vma
;
211 struct cl_lock_descr
*descr
= &cti
->cti_descr
;
212 ldlm_policy_data_t policy
;
219 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
221 if (!cl_is_normalio(env
, io
))
224 if (vio
->cui_iter
== NULL
) /* nfs or loop back device write */
227 /* No MM (e.g. NFS)? No vmas too. */
231 iov_for_each(iov
, i
, *(vio
->cui_iter
)) {
232 addr
= (unsigned long)iov
.iov_base
;
237 count
+= addr
& (~CFS_PAGE_MASK
);
238 addr
&= CFS_PAGE_MASK
;
240 down_read(&mm
->mmap_sem
);
241 while((vma
= our_vma(mm
, addr
, count
)) != NULL
) {
242 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
243 int flags
= CEF_MUST
;
245 if (ll_file_nolock(vma
->vm_file
)) {
247 * For no lock case, a lockless lock will be
254 * XXX: Required lock mode can be weakened: CIT_WRITE
255 * io only ever reads user level buffer, and CIT_READ
258 policy_from_vma(&policy
, vma
, addr
, count
);
259 descr
->cld_mode
= vvp_mode_from_vma(vma
);
260 descr
->cld_obj
= ll_i2info(inode
)->lli_clob
;
261 descr
->cld_start
= cl_index(descr
->cld_obj
,
262 policy
.l_extent
.start
);
263 descr
->cld_end
= cl_index(descr
->cld_obj
,
264 policy
.l_extent
.end
);
265 descr
->cld_enq_flags
= flags
;
266 result
= cl_io_lock_alloc_add(env
, io
, descr
);
268 CDEBUG(D_VFSTRACE
, "lock: %d: [%lu, %lu]\n",
269 descr
->cld_mode
, descr
->cld_start
,
273 up_read(&mm
->mmap_sem
);
277 if (vma
->vm_end
- addr
>= count
)
280 count
-= vma
->vm_end
- addr
;
283 up_read(&mm
->mmap_sem
);
288 static int vvp_io_rw_lock(const struct lu_env
*env
, struct cl_io
*io
,
289 enum cl_lock_mode mode
, loff_t start
, loff_t end
)
291 struct ccc_io
*cio
= ccc_env_io(env
);
295 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
297 ccc_io_update_iov(env
, cio
, io
);
299 if (io
->u
.ci_rw
.crw_nonblock
)
300 ast_flags
|= CEF_NONBLOCK
;
301 result
= vvp_mmap_locks(env
, cio
, io
);
303 result
= ccc_io_one_lock(env
, io
, ast_flags
, mode
, start
, end
);
307 static int vvp_io_read_lock(const struct lu_env
*env
,
308 const struct cl_io_slice
*ios
)
310 struct cl_io
*io
= ios
->cis_io
;
311 struct ll_inode_info
*lli
= ll_i2info(ccc_object_inode(io
->ci_obj
));
314 /* XXX: Layer violation, we shouldn't see lsm at llite level. */
315 if (lli
->lli_has_smd
) /* lsm-less file doesn't need to lock */
316 result
= vvp_io_rw_lock(env
, io
, CLM_READ
,
317 io
->u
.ci_rd
.rd
.crw_pos
,
318 io
->u
.ci_rd
.rd
.crw_pos
+
319 io
->u
.ci_rd
.rd
.crw_count
- 1);
325 static int vvp_io_fault_lock(const struct lu_env
*env
,
326 const struct cl_io_slice
*ios
)
328 struct cl_io
*io
= ios
->cis_io
;
329 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
331 * XXX LDLM_FL_CBPENDING
333 return ccc_io_one_lock_index
334 (env
, io
, 0, vvp_mode_from_vma(vio
->u
.fault
.ft_vma
),
335 io
->u
.ci_fault
.ft_index
, io
->u
.ci_fault
.ft_index
);
338 static int vvp_io_write_lock(const struct lu_env
*env
,
339 const struct cl_io_slice
*ios
)
341 struct cl_io
*io
= ios
->cis_io
;
345 if (io
->u
.ci_wr
.wr_append
) {
347 end
= OBD_OBJECT_EOF
;
349 start
= io
->u
.ci_wr
.wr
.crw_pos
;
350 end
= start
+ io
->u
.ci_wr
.wr
.crw_count
- 1;
352 return vvp_io_rw_lock(env
, io
, CLM_WRITE
, start
, end
);
355 static int vvp_io_setattr_iter_init(const struct lu_env
*env
,
356 const struct cl_io_slice
*ios
)
362 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
364 * Handles "lockless io" mode when extent locking is done by server.
366 static int vvp_io_setattr_lock(const struct lu_env
*env
,
367 const struct cl_io_slice
*ios
)
369 struct ccc_io
*cio
= ccc_env_io(env
);
370 struct cl_io
*io
= ios
->cis_io
;
374 if (cl_io_is_trunc(io
)) {
375 new_size
= io
->u
.ci_setattr
.sa_attr
.lvb_size
;
377 enqflags
= CEF_DISCARD_DATA
;
379 if ((io
->u
.ci_setattr
.sa_attr
.lvb_mtime
>=
380 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
) ||
381 (io
->u
.ci_setattr
.sa_attr
.lvb_atime
>=
382 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
))
386 cio
->u
.setattr
.cui_local_lock
= SETATTR_EXTENT_LOCK
;
387 return ccc_io_one_lock(env
, io
, enqflags
, CLM_WRITE
,
388 new_size
, OBD_OBJECT_EOF
);
391 static int vvp_do_vmtruncate(struct inode
*inode
, size_t size
)
395 * Only ll_inode_size_lock is taken at this level.
397 ll_inode_size_lock(inode
);
398 result
= inode_newsize_ok(inode
, size
);
400 ll_inode_size_unlock(inode
);
403 truncate_setsize(inode
, size
);
404 ll_inode_size_unlock(inode
);
408 static int vvp_io_setattr_trunc(const struct lu_env
*env
,
409 const struct cl_io_slice
*ios
,
410 struct inode
*inode
, loff_t size
)
412 inode_dio_wait(inode
);
416 static int vvp_io_setattr_time(const struct lu_env
*env
,
417 const struct cl_io_slice
*ios
)
419 struct cl_io
*io
= ios
->cis_io
;
420 struct cl_object
*obj
= io
->ci_obj
;
421 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
423 unsigned valid
= CAT_CTIME
;
425 cl_object_attr_lock(obj
);
426 attr
->cat_ctime
= io
->u
.ci_setattr
.sa_attr
.lvb_ctime
;
427 if (io
->u
.ci_setattr
.sa_valid
& ATTR_ATIME_SET
) {
428 attr
->cat_atime
= io
->u
.ci_setattr
.sa_attr
.lvb_atime
;
431 if (io
->u
.ci_setattr
.sa_valid
& ATTR_MTIME_SET
) {
432 attr
->cat_mtime
= io
->u
.ci_setattr
.sa_attr
.lvb_mtime
;
435 result
= cl_object_attr_set(env
, obj
, attr
, valid
);
436 cl_object_attr_unlock(obj
);
441 static int vvp_io_setattr_start(const struct lu_env
*env
,
442 const struct cl_io_slice
*ios
)
444 struct cl_io
*io
= ios
->cis_io
;
445 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
448 mutex_lock(&inode
->i_mutex
);
449 if (cl_io_is_trunc(io
))
450 result
= vvp_io_setattr_trunc(env
, ios
, inode
,
451 io
->u
.ci_setattr
.sa_attr
.lvb_size
);
453 result
= vvp_io_setattr_time(env
, ios
);
457 static void vvp_io_setattr_end(const struct lu_env
*env
,
458 const struct cl_io_slice
*ios
)
460 struct cl_io
*io
= ios
->cis_io
;
461 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
463 if (cl_io_is_trunc(io
)) {
464 /* Truncate in memory pages - they must be clean pages
465 * because osc has already notified to destroy osc_extents. */
466 vvp_do_vmtruncate(inode
, io
->u
.ci_setattr
.sa_attr
.lvb_size
);
467 inode_dio_write_done(inode
);
469 mutex_unlock(&inode
->i_mutex
);
472 static void vvp_io_setattr_fini(const struct lu_env
*env
,
473 const struct cl_io_slice
*ios
)
475 vvp_io_fini(env
, ios
);
478 static int vvp_io_read_start(const struct lu_env
*env
,
479 const struct cl_io_slice
*ios
)
481 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
482 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
483 struct cl_io
*io
= ios
->cis_io
;
484 struct cl_object
*obj
= io
->ci_obj
;
485 struct inode
*inode
= ccc_object_inode(obj
);
486 struct ll_ra_read
*bead
= &vio
->cui_bead
;
487 struct file
*file
= cio
->cui_fd
->fd_file
;
490 loff_t pos
= io
->u
.ci_rd
.rd
.crw_pos
;
491 long cnt
= io
->u
.ci_rd
.rd
.crw_count
;
492 long tot
= cio
->cui_tot_count
;
495 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
497 CDEBUG(D_VFSTRACE
, "read: -> [%lli, %lli)\n", pos
, pos
+ cnt
);
499 if (!can_populate_pages(env
, io
, inode
))
502 result
= ccc_prep_size(env
, obj
, io
, pos
, tot
, &exceed
);
505 else if (exceed
!= 0)
508 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
,
509 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
510 inode
->i_ino
, cnt
, pos
, i_size_read(inode
));
512 /* turn off the kernel's read-ahead */
513 cio
->cui_fd
->fd_file
->f_ra
.ra_pages
= 0;
515 /* initialize read-ahead window once per syscall */
516 if (!vio
->cui_ra_window_set
) {
517 vio
->cui_ra_window_set
= 1;
518 bead
->lrr_start
= cl_index(obj
, pos
);
520 * XXX: explicit PAGE_CACHE_SIZE
522 bead
->lrr_count
= cl_index(obj
, tot
+ PAGE_CACHE_SIZE
- 1);
523 ll_ra_read_in(file
, bead
);
528 switch (vio
->cui_io_subtype
) {
530 LASSERT(cio
->cui_iocb
->ki_pos
== pos
);
531 result
= generic_file_read_iter(cio
->cui_iocb
, cio
->cui_iter
);
534 result
= generic_file_splice_read(file
, &pos
,
535 vio
->u
.splice
.cui_pipe
, cnt
,
536 vio
->u
.splice
.cui_flags
);
537 /* LU-1109: do splice read stripe by stripe otherwise if it
538 * may make nfsd stuck if this read occupied all internal pipe
543 CERROR("Wrong IO type %u\n", vio
->cui_io_subtype
);
551 io
->ci_nob
+= result
;
552 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
553 cio
->cui_fd
, pos
, result
, READ
);
559 static void vvp_io_read_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
561 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
562 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
564 if (vio
->cui_ra_window_set
)
565 ll_ra_read_ex(cio
->cui_fd
->fd_file
, &vio
->cui_bead
);
567 vvp_io_fini(env
, ios
);
570 static int vvp_io_write_start(const struct lu_env
*env
,
571 const struct cl_io_slice
*ios
)
573 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
574 struct cl_io
*io
= ios
->cis_io
;
575 struct cl_object
*obj
= io
->ci_obj
;
576 struct inode
*inode
= ccc_object_inode(obj
);
578 loff_t pos
= io
->u
.ci_wr
.wr
.crw_pos
;
579 size_t cnt
= io
->u
.ci_wr
.wr
.crw_count
;
581 if (!can_populate_pages(env
, io
, inode
))
584 if (cl_io_is_append(io
)) {
586 * PARALLEL IO This has to be changed for parallel IO doing
587 * out-of-order writes.
589 pos
= io
->u
.ci_wr
.wr
.crw_pos
= i_size_read(inode
);
590 cio
->cui_iocb
->ki_pos
= pos
;
592 LASSERT(cio
->cui_iocb
->ki_pos
== pos
);
595 CDEBUG(D_VFSTRACE
, "write: [%lli, %lli)\n", pos
, pos
+ (long long)cnt
);
597 if (cio
->cui_iter
== NULL
) /* from a temp io in ll_cl_init(). */
600 result
= generic_file_write_iter(cio
->cui_iocb
, cio
->cui_iter
);
605 io
->ci_nob
+= result
;
606 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
607 cio
->cui_fd
, pos
, result
, WRITE
);
613 static int vvp_io_kernel_fault(struct vvp_fault_io
*cfio
)
615 struct vm_fault
*vmf
= cfio
->fault
.ft_vmf
;
617 cfio
->fault
.ft_flags
= filemap_fault(cfio
->ft_vma
, vmf
);
621 "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
622 vmf
->page
, vmf
->page
->mapping
, vmf
->page
->index
,
623 (long)vmf
->page
->flags
, page_count(vmf
->page
),
624 page_private(vmf
->page
), vmf
->virtual_address
);
625 if (unlikely(!(cfio
->fault
.ft_flags
& VM_FAULT_LOCKED
))) {
626 lock_page(vmf
->page
);
627 cfio
->fault
.ft_flags
|= VM_FAULT_LOCKED
;
630 cfio
->ft_vmpage
= vmf
->page
;
634 if (cfio
->fault
.ft_flags
& VM_FAULT_SIGBUS
) {
635 CDEBUG(D_PAGE
, "got addr %p - SIGBUS\n", vmf
->virtual_address
);
639 if (cfio
->fault
.ft_flags
& VM_FAULT_OOM
) {
640 CDEBUG(D_PAGE
, "got addr %p - OOM\n", vmf
->virtual_address
);
644 if (cfio
->fault
.ft_flags
& VM_FAULT_RETRY
)
647 CERROR("Unknown error in page fault %d!\n", cfio
->fault
.ft_flags
);
652 static int vvp_io_fault_start(const struct lu_env
*env
,
653 const struct cl_io_slice
*ios
)
655 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
656 struct cl_io
*io
= ios
->cis_io
;
657 struct cl_object
*obj
= io
->ci_obj
;
658 struct inode
*inode
= ccc_object_inode(obj
);
659 struct cl_fault_io
*fio
= &io
->u
.ci_fault
;
660 struct vvp_fault_io
*cfio
= &vio
->u
.fault
;
663 struct page
*vmpage
= NULL
;
664 struct cl_page
*page
;
666 pgoff_t last
; /* last page in a file data region */
668 if (fio
->ft_executable
&&
669 LTIME_S(inode
->i_mtime
) != vio
->u
.fault
.ft_mtime
)
671 " changed while waiting for the page fault lock\n",
672 PFID(lu_object_fid(&obj
->co_lu
)));
674 /* offset of the last byte on the page */
675 offset
= cl_offset(obj
, fio
->ft_index
+ 1) - 1;
676 LASSERT(cl_index(obj
, offset
) == fio
->ft_index
);
677 result
= ccc_prep_size(env
, obj
, io
, 0, offset
+ 1, NULL
);
681 /* must return locked page */
682 if (fio
->ft_mkwrite
) {
683 LASSERT(cfio
->ft_vmpage
!= NULL
);
684 lock_page(cfio
->ft_vmpage
);
686 result
= vvp_io_kernel_fault(cfio
);
691 vmpage
= cfio
->ft_vmpage
;
692 LASSERT(PageLocked(vmpage
));
694 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE
))
695 ll_invalidate_page(vmpage
);
697 size
= i_size_read(inode
);
698 /* Though we have already held a cl_lock upon this page, but
699 * it still can be truncated locally. */
700 if (unlikely((vmpage
->mapping
!= inode
->i_mapping
) ||
701 (page_offset(vmpage
) > size
))) {
702 CDEBUG(D_PAGE
, "llite: fault and truncate race happened!\n");
704 /* return +1 to stop cl_io_loop() and ll_fault() will catch
706 GOTO(out
, result
= +1);
710 if (fio
->ft_mkwrite
) {
713 * Capture the size while holding the lli_trunc_sem from above
714 * we want to make sure that we complete the mkwrite action
715 * while holding this lock. We need to make sure that we are
716 * not past the end of the file.
718 last_index
= cl_index(obj
, size
- 1);
719 if (last_index
< fio
->ft_index
) {
721 "llite: mkwrite and truncate race happened: "
723 vmpage
->mapping
,fio
->ft_index
,last_index
);
725 * We need to return if we are
726 * passed the end of the file. This will propagate
727 * up the call stack to ll_page_mkwrite where
728 * we will return VM_FAULT_NOPAGE. Any non-negative
729 * value returned here will be silently
730 * converted to 0. If the vmpage->mapping is null
731 * the error code would be converted back to ENODATA
732 * in ll_page_mkwrite0. Thus we return -ENODATA
733 * to handle both cases
735 GOTO(out
, result
= -ENODATA
);
739 page
= cl_page_find(env
, obj
, fio
->ft_index
, vmpage
, CPT_CACHEABLE
);
741 GOTO(out
, result
= PTR_ERR(page
));
743 /* if page is going to be written, we should add this page into cache
745 if (fio
->ft_mkwrite
) {
746 wait_on_page_writeback(vmpage
);
747 if (set_page_dirty(vmpage
)) {
750 /* vvp_page_assume() calls wait_on_page_writeback(). */
751 cl_page_assume(env
, io
, page
);
753 cp
= cl2ccc_page(cl_page_at(page
, &vvp_device_type
));
754 vvp_write_pending(cl2ccc(obj
), cp
);
756 /* Do not set Dirty bit here so that in case IO is
757 * started before the page is really made dirty, we
758 * still have chance to detect it. */
759 result
= cl_page_cache_add(env
, io
, page
, CRT_WRITE
);
760 LASSERT(cl_page_is_owned(page
, io
));
764 cl_page_unmap(env
, io
, page
);
765 cl_page_discard(env
, io
, page
);
766 cl_page_disown(env
, io
, page
);
768 cl_page_put(env
, page
);
770 /* we're in big trouble, what can we do now? */
771 if (result
== -EDQUOT
)
775 cl_page_disown(env
, io
, page
);
779 last
= cl_index(obj
, size
- 1);
781 * The ft_index is only used in the case of
782 * a mkwrite action. We need to check
783 * our assertions are correct, since
784 * we should have caught this above
786 LASSERT(!fio
->ft_mkwrite
|| fio
->ft_index
<= last
);
787 if (fio
->ft_index
== last
)
789 * Last page is mapped partially.
791 fio
->ft_nob
= size
- cl_offset(obj
, fio
->ft_index
);
793 fio
->ft_nob
= cl_page_size(obj
);
795 lu_ref_add(&page
->cp_reference
, "fault", io
);
799 /* return unlocked vmpage to avoid deadlocking */
802 cfio
->fault
.ft_flags
&= ~VM_FAULT_LOCKED
;
806 static int vvp_io_fsync_start(const struct lu_env
*env
,
807 const struct cl_io_slice
*ios
)
809 /* we should mark TOWRITE bit to each dirty page in radix tree to
810 * verify pages have been written, but this is difficult because of
815 static int vvp_io_read_page(const struct lu_env
*env
,
816 const struct cl_io_slice
*ios
,
817 const struct cl_page_slice
*slice
)
819 struct cl_io
*io
= ios
->cis_io
;
820 struct cl_object
*obj
= slice
->cpl_obj
;
821 struct ccc_page
*cp
= cl2ccc_page(slice
);
822 struct cl_page
*page
= slice
->cpl_page
;
823 struct inode
*inode
= ccc_object_inode(obj
);
824 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
825 struct ll_file_data
*fd
= cl2ccc_io(env
, ios
)->cui_fd
;
826 struct ll_readahead_state
*ras
= &fd
->fd_ras
;
827 struct page
*vmpage
= cp
->cpg_page
;
828 struct cl_2queue
*queue
= &io
->ci_queue
;
831 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
832 LASSERT(slice
->cpl_obj
== obj
);
834 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
835 sbi
->ll_ra_info
.ra_max_pages
)
836 ras_update(sbi
, inode
, ras
, page
->cp_index
,
837 cp
->cpg_defer_uptodate
);
839 /* Sanity check whether the page is protected by a lock. */
840 rc
= cl_page_is_under_lock(env
, io
, page
);
842 CL_PAGE_HEADER(D_WARNING
, env
, page
, "%s: %d\n",
843 rc
== -ENODATA
? "without a lock" :
849 if (cp
->cpg_defer_uptodate
) {
851 cl_page_export(env
, page
, 1);
854 * Add page into the queue even when it is marked uptodate above.
855 * this will unlock it automatically as part of cl_page_list_disown().
857 cl_2queue_add(queue
, page
);
858 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
859 sbi
->ll_ra_info
.ra_max_pages
)
860 ll_readahead(env
, io
, ras
,
861 vmpage
->mapping
, &queue
->c2_qin
, fd
->fd_flags
);
866 static int vvp_page_sync_io(const struct lu_env
*env
, struct cl_io
*io
,
867 struct cl_page
*page
, struct ccc_page
*cp
,
868 enum cl_req_type crt
)
870 struct cl_2queue
*queue
;
873 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
875 queue
= &io
->ci_queue
;
876 cl_2queue_init_page(queue
, page
);
878 result
= cl_io_submit_sync(env
, io
, crt
, queue
, 0);
879 LASSERT(cl_page_is_owned(page
, io
));
883 * in CRT_WRITE case page is left locked even in case of
886 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
887 cl_2queue_fini(env
, queue
);
893 * Prepare partially written-to page for a write.
895 static int vvp_io_prepare_partial(const struct lu_env
*env
, struct cl_io
*io
,
896 struct cl_object
*obj
, struct cl_page
*pg
,
898 unsigned from
, unsigned to
)
900 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
901 loff_t offset
= cl_offset(obj
, pg
->cp_index
);
904 cl_object_attr_lock(obj
);
905 result
= cl_object_attr_get(env
, obj
, attr
);
906 cl_object_attr_unlock(obj
);
909 * If are writing to a new page, no need to read old data.
910 * The extent locking will have updated the KMS, and for our
911 * purposes here we can treat it like i_size.
913 if (attr
->cat_kms
<= offset
) {
914 char *kaddr
= kmap_atomic(cp
->cpg_page
);
916 memset(kaddr
, 0, cl_page_size(obj
));
917 kunmap_atomic(kaddr
);
918 } else if (cp
->cpg_defer_uptodate
)
921 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_READ
);
923 * In older implementations, obdo_refresh_inode is called here
924 * to update the inode because the write might modify the
925 * object info at OST. However, this has been proven useless,
926 * since LVB functions will be called when user space program
927 * tries to retrieve inode attribute. Also, see bug 15909 for
931 cl_page_export(env
, pg
, 1);
936 static int vvp_io_prepare_write(const struct lu_env
*env
,
937 const struct cl_io_slice
*ios
,
938 const struct cl_page_slice
*slice
,
939 unsigned from
, unsigned to
)
941 struct cl_object
*obj
= slice
->cpl_obj
;
942 struct ccc_page
*cp
= cl2ccc_page(slice
);
943 struct cl_page
*pg
= slice
->cpl_page
;
944 struct page
*vmpage
= cp
->cpg_page
;
948 LINVRNT(cl_page_is_vmlocked(env
, pg
));
949 LASSERT(vmpage
->mapping
->host
== ccc_object_inode(obj
));
953 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "preparing: [%d, %d]\n", from
, to
);
954 if (!PageUptodate(vmpage
)) {
956 * We're completely overwriting an existing page, so _don't_
957 * set it up to date until commit_write
959 if (from
== 0 && to
== PAGE_CACHE_SIZE
) {
960 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "full page write\n");
961 POISON_PAGE(page
, 0x11);
963 result
= vvp_io_prepare_partial(env
, ios
->cis_io
, obj
,
966 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "uptodate\n");
970 static int vvp_io_commit_write(const struct lu_env
*env
,
971 const struct cl_io_slice
*ios
,
972 const struct cl_page_slice
*slice
,
973 unsigned from
, unsigned to
)
975 struct cl_object
*obj
= slice
->cpl_obj
;
976 struct cl_io
*io
= ios
->cis_io
;
977 struct ccc_page
*cp
= cl2ccc_page(slice
);
978 struct cl_page
*pg
= slice
->cpl_page
;
979 struct inode
*inode
= ccc_object_inode(obj
);
980 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
981 struct ll_inode_info
*lli
= ll_i2info(inode
);
982 struct page
*vmpage
= cp
->cpg_page
;
988 LINVRNT(cl_page_is_vmlocked(env
, pg
));
989 LASSERT(vmpage
->mapping
->host
== inode
);
991 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
, "committing page write\n");
992 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "committing: [%d, %d]\n", from
, to
);
995 * queue a write for some time in the future the first time we
998 * This is different from what other file systems do: they usually
999 * just mark page (and some of its buffers) dirty and rely on
1000 * balance_dirty_pages() to start a write-back. Lustre wants write-back
1001 * to be started earlier for the following reasons:
1003 * (1) with a large number of clients we need to limit the amount
1004 * of cached data on the clients a lot;
1006 * (2) large compute jobs generally want compute-only then io-only
1007 * and the IO should complete as quickly as possible;
1009 * (3) IO is batched up to the RPC size and is async until the
1010 * client max cache is hit
1011 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1014 if (!PageDirty(vmpage
)) {
1015 tallyop
= LPROC_LL_DIRTY_MISSES
;
1016 result
= cl_page_cache_add(env
, io
, pg
, CRT_WRITE
);
1018 /* page was added into cache successfully. */
1019 set_page_dirty(vmpage
);
1020 vvp_write_pending(cl2ccc(obj
), cp
);
1021 } else if (result
== -EDQUOT
) {
1022 pgoff_t last_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
1023 bool need_clip
= true;
1026 * Client ran out of disk space grant. Possible
1029 * (a) do a sync write, renewing grant;
1031 * (b) stop writing on this stripe, switch to the
1034 * (b) is a part of "parallel io" design that is the
1035 * ultimate goal. (a) is what "old" client did, and
1036 * what the new code continues to do for the time
1039 if (last_index
> pg
->cp_index
) {
1040 to
= PAGE_CACHE_SIZE
;
1042 } else if (last_index
== pg
->cp_index
) {
1043 int size_to
= i_size_read(inode
) & ~CFS_PAGE_MASK
;
1048 cl_page_clip(env
, pg
, 0, to
);
1049 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_WRITE
);
1051 CERROR("Write page %lu of inode %p failed %d\n",
1052 pg
->cp_index
, inode
, result
);
1055 tallyop
= LPROC_LL_DIRTY_HITS
;
1058 ll_stats_ops_tally(sbi
, tallyop
, 1);
1060 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1061 * because page could have been not flushed between 2 modifications.
1062 * It is important the file is marked DIRTY as soon as the I/O is done
1063 * Indeed, when cache is flushed, file could be already closed and it
1064 * is too late to warn the MDT.
1065 * It is acceptable that file is marked DIRTY even if I/O is dropped
1066 * for some reasons before being flushed to OST.
1069 spin_lock(&lli
->lli_lock
);
1070 lli
->lli_flags
|= LLIF_DATA_MODIFIED
;
1071 spin_unlock(&lli
->lli_lock
);
1074 size
= cl_offset(obj
, pg
->cp_index
) + to
;
1076 ll_inode_size_lock(inode
);
1078 if (size
> i_size_read(inode
)) {
1079 cl_isize_write_nolock(inode
, size
);
1080 CDEBUG(D_VFSTRACE
, DFID
" updating i_size %lu\n",
1081 PFID(lu_object_fid(&obj
->co_lu
)),
1082 (unsigned long)size
);
1084 cl_page_export(env
, pg
, 1);
1086 if (size
> i_size_read(inode
))
1087 cl_page_discard(env
, io
, pg
);
1089 ll_inode_size_unlock(inode
);
1093 static const struct cl_io_operations vvp_io_ops
= {
1096 .cio_fini
= vvp_io_read_fini
,
1097 .cio_lock
= vvp_io_read_lock
,
1098 .cio_start
= vvp_io_read_start
,
1099 .cio_advance
= ccc_io_advance
1102 .cio_fini
= vvp_io_fini
,
1103 .cio_lock
= vvp_io_write_lock
,
1104 .cio_start
= vvp_io_write_start
,
1105 .cio_advance
= ccc_io_advance
1108 .cio_fini
= vvp_io_setattr_fini
,
1109 .cio_iter_init
= vvp_io_setattr_iter_init
,
1110 .cio_lock
= vvp_io_setattr_lock
,
1111 .cio_start
= vvp_io_setattr_start
,
1112 .cio_end
= vvp_io_setattr_end
1115 .cio_fini
= vvp_io_fault_fini
,
1116 .cio_iter_init
= vvp_io_fault_iter_init
,
1117 .cio_lock
= vvp_io_fault_lock
,
1118 .cio_start
= vvp_io_fault_start
,
1119 .cio_end
= ccc_io_end
1122 .cio_start
= vvp_io_fsync_start
,
1123 .cio_fini
= vvp_io_fini
1126 .cio_fini
= vvp_io_fini
1129 .cio_read_page
= vvp_io_read_page
,
1130 .cio_prepare_write
= vvp_io_prepare_write
,
1131 .cio_commit_write
= vvp_io_commit_write
1134 int vvp_io_init(const struct lu_env
*env
, struct cl_object
*obj
,
1137 struct vvp_io
*vio
= vvp_env_io(env
);
1138 struct ccc_io
*cio
= ccc_env_io(env
);
1139 struct inode
*inode
= ccc_object_inode(obj
);
1142 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
1144 CDEBUG(D_VFSTRACE
, DFID
1145 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1146 PFID(lu_object_fid(&obj
->co_lu
)),
1147 io
->ci_ignore_layout
, io
->ci_verify_layout
,
1148 cio
->cui_layout_gen
, io
->ci_restore_needed
);
1150 CL_IO_SLICE_CLEAN(cio
, cui_cl
);
1151 cl_io_slice_add(io
, &cio
->cui_cl
, obj
, &vvp_io_ops
);
1152 vio
->cui_ra_window_set
= 0;
1154 if (io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
) {
1156 struct ll_inode_info
*lli
= ll_i2info(inode
);
1158 count
= io
->u
.ci_rw
.crw_count
;
1159 /* "If nbyte is 0, read() will return 0 and have no other
1160 * results." -- Single Unix Spec */
1164 cio
->cui_tot_count
= count
;
1166 /* for read/write, we store the jobid in the inode, and
1167 * it'll be fetched by osc when building RPC.
1169 * it's not accurate if the file is shared by different
1172 lustre_get_jobid(lli
->lli_jobid
);
1173 } else if (io
->ci_type
== CIT_SETATTR
) {
1174 if (!cl_io_is_trunc(io
))
1175 io
->ci_lockreq
= CILR_MANDATORY
;
1178 /* ignore layout change for generic CIT_MISC but not for glimpse.
1179 * io context for glimpse must set ci_verify_layout to true,
1180 * see cl_glimpse_size0() for details. */
1181 if (io
->ci_type
== CIT_MISC
&& !io
->ci_verify_layout
)
1182 io
->ci_ignore_layout
= 1;
1184 /* Enqueue layout lock and get layout version. We need to do this
1185 * even for operations requiring to open file, such as read and write,
1186 * because it might not grant layout lock in IT_OPEN. */
1187 if (result
== 0 && !io
->ci_ignore_layout
) {
1188 result
= ll_layout_refresh(inode
, &cio
->cui_layout_gen
);
1189 if (result
== -ENOENT
)
1190 /* If the inode on MDS has been removed, but the objects
1191 * on OSTs haven't been destroyed (async unlink), layout
1192 * fetch will return -ENOENT, we'd ignore this error
1193 * and continue with dirty flush. LU-3230. */
1196 CERROR("%s: refresh file layout " DFID
" error %d.\n",
1197 ll_get_fsname(inode
->i_sb
, NULL
, 0),
1198 PFID(lu_object_fid(&obj
->co_lu
)), result
);
1204 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
1205 const struct cl_io_slice
*slice
)
1207 /* Calling just for assertion */
1208 cl2ccc_io(env
, slice
);
1209 return vvp_env_io(env
);