4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <lustre_lite.h>
48 #include "vvp_internal.h"
50 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
51 const struct cl_io_slice
*slice
);
54 * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
56 int cl_is_normalio(const struct lu_env
*env
, const struct cl_io
*io
)
58 struct vvp_io
*vio
= vvp_env_io(env
);
60 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
62 return vio
->cui_io_subtype
== IO_NORMAL
;
66 * For swapping layout. The file's layout may have changed.
67 * To avoid populating pages to a wrong stripe, we have to verify the
68 * correctness of layout. It works because swapping layout processes
69 * have to acquire group lock.
71 static bool can_populate_pages(const struct lu_env
*env
, struct cl_io
*io
,
74 struct ll_inode_info
*lli
= ll_i2info(inode
);
75 struct ccc_io
*cio
= ccc_env_io(env
);
78 switch (io
->ci_type
) {
81 /* don't need lock here to check lli_layout_gen as we have held
82 * extent lock and GROUP lock has to hold to swap layout */
83 if (lli
->lli_layout_gen
!= cio
->cui_layout_gen
) {
84 io
->ci_need_restart
= 1;
85 /* this will return application a short read/write */
90 /* fault is okay because we've already had a page. */
98 /*****************************************************************************
104 static int vvp_io_fault_iter_init(const struct lu_env
*env
,
105 const struct cl_io_slice
*ios
)
107 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
108 struct inode
*inode
= ccc_object_inode(ios
->cis_obj
);
111 cl2ccc_io(env
, ios
)->cui_fd
->fd_file
->f_dentry
->d_inode
);
112 vio
->u
.fault
.ft_mtime
= LTIME_S(inode
->i_mtime
);
116 static void vvp_io_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
118 struct cl_io
*io
= ios
->cis_io
;
119 struct cl_object
*obj
= io
->ci_obj
;
120 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
122 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
124 CDEBUG(D_VFSTRACE
, "ignore/verify layout %d/%d, layout version %d.\n",
125 io
->ci_ignore_layout
, io
->ci_verify_layout
, cio
->cui_layout_gen
);
127 if (!io
->ci_ignore_layout
&& io
->ci_verify_layout
) {
130 /* check layout version */
131 ll_layout_refresh(ccc_object_inode(obj
), &gen
);
132 io
->ci_need_restart
= cio
->cui_layout_gen
!= gen
;
133 if (io
->ci_need_restart
)
134 CDEBUG(D_VFSTRACE
, "layout changed from %d to %d.\n",
135 cio
->cui_layout_gen
, gen
);
139 static void vvp_io_fault_fini(const struct lu_env
*env
,
140 const struct cl_io_slice
*ios
)
142 struct cl_io
*io
= ios
->cis_io
;
143 struct cl_page
*page
= io
->u
.ci_fault
.ft_page
;
145 CLOBINVRNT(env
, io
->ci_obj
, ccc_object_invariant(io
->ci_obj
));
148 lu_ref_del(&page
->cp_reference
, "fault", io
);
149 cl_page_put(env
, page
);
150 io
->u
.ci_fault
.ft_page
= NULL
;
152 vvp_io_fini(env
, ios
);
155 enum cl_lock_mode
vvp_mode_from_vma(struct vm_area_struct
*vma
)
158 * we only want to hold PW locks if the mmap() can generate
159 * writes back to the file and that only happens in shared
162 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_WRITE
))
167 static int vvp_mmap_locks(const struct lu_env
*env
,
168 struct ccc_io
*vio
, struct cl_io
*io
)
170 struct ccc_thread_info
*cti
= ccc_env_info(env
);
171 struct mm_struct
*mm
= current
->mm
;
172 struct vm_area_struct
*vma
;
173 struct cl_lock_descr
*descr
= &cti
->cti_descr
;
174 ldlm_policy_data_t policy
;
180 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
182 if (!cl_is_normalio(env
, io
))
185 if (vio
->cui_iov
== NULL
) /* nfs or loop back device write */
188 /* No MM (e.g. NFS)? No vmas too. */
192 for (seg
= 0; seg
< vio
->cui_nrsegs
; seg
++) {
193 const struct iovec
*iv
= &vio
->cui_iov
[seg
];
195 addr
= (unsigned long)iv
->iov_base
;
200 count
+= addr
& (~CFS_PAGE_MASK
);
201 addr
&= CFS_PAGE_MASK
;
203 down_read(&mm
->mmap_sem
);
204 while((vma
= our_vma(mm
, addr
, count
)) != NULL
) {
205 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
206 int flags
= CEF_MUST
;
208 if (ll_file_nolock(vma
->vm_file
)) {
210 * For no lock case, a lockless lock will be
217 * XXX: Required lock mode can be weakened: CIT_WRITE
218 * io only ever reads user level buffer, and CIT_READ
221 policy_from_vma(&policy
, vma
, addr
, count
);
222 descr
->cld_mode
= vvp_mode_from_vma(vma
);
223 descr
->cld_obj
= ll_i2info(inode
)->lli_clob
;
224 descr
->cld_start
= cl_index(descr
->cld_obj
,
225 policy
.l_extent
.start
);
226 descr
->cld_end
= cl_index(descr
->cld_obj
,
227 policy
.l_extent
.end
);
228 descr
->cld_enq_flags
= flags
;
229 result
= cl_io_lock_alloc_add(env
, io
, descr
);
231 CDEBUG(D_VFSTRACE
, "lock: %d: [%lu, %lu]\n",
232 descr
->cld_mode
, descr
->cld_start
,
238 if (vma
->vm_end
- addr
>= count
)
241 count
-= vma
->vm_end
- addr
;
244 up_read(&mm
->mmap_sem
);
249 static int vvp_io_rw_lock(const struct lu_env
*env
, struct cl_io
*io
,
250 enum cl_lock_mode mode
, loff_t start
, loff_t end
)
252 struct ccc_io
*cio
= ccc_env_io(env
);
256 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
258 ccc_io_update_iov(env
, cio
, io
);
260 if (io
->u
.ci_rw
.crw_nonblock
)
261 ast_flags
|= CEF_NONBLOCK
;
262 result
= vvp_mmap_locks(env
, cio
, io
);
264 result
= ccc_io_one_lock(env
, io
, ast_flags
, mode
, start
, end
);
268 static int vvp_io_read_lock(const struct lu_env
*env
,
269 const struct cl_io_slice
*ios
)
271 struct cl_io
*io
= ios
->cis_io
;
272 struct ll_inode_info
*lli
= ll_i2info(ccc_object_inode(io
->ci_obj
));
275 /* XXX: Layer violation, we shouldn't see lsm at llite level. */
276 if (lli
->lli_has_smd
) /* lsm-less file doesn't need to lock */
277 result
= vvp_io_rw_lock(env
, io
, CLM_READ
,
278 io
->u
.ci_rd
.rd
.crw_pos
,
279 io
->u
.ci_rd
.rd
.crw_pos
+
280 io
->u
.ci_rd
.rd
.crw_count
- 1);
286 static int vvp_io_fault_lock(const struct lu_env
*env
,
287 const struct cl_io_slice
*ios
)
289 struct cl_io
*io
= ios
->cis_io
;
290 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
292 * XXX LDLM_FL_CBPENDING
294 return ccc_io_one_lock_index
295 (env
, io
, 0, vvp_mode_from_vma(vio
->u
.fault
.ft_vma
),
296 io
->u
.ci_fault
.ft_index
, io
->u
.ci_fault
.ft_index
);
299 static int vvp_io_write_lock(const struct lu_env
*env
,
300 const struct cl_io_slice
*ios
)
302 struct cl_io
*io
= ios
->cis_io
;
306 if (io
->u
.ci_wr
.wr_append
) {
308 end
= OBD_OBJECT_EOF
;
310 start
= io
->u
.ci_wr
.wr
.crw_pos
;
311 end
= start
+ io
->u
.ci_wr
.wr
.crw_count
- 1;
313 return vvp_io_rw_lock(env
, io
, CLM_WRITE
, start
, end
);
316 static int vvp_io_setattr_iter_init(const struct lu_env
*env
,
317 const struct cl_io_slice
*ios
)
323 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
325 * Handles "lockless io" mode when extent locking is done by server.
327 static int vvp_io_setattr_lock(const struct lu_env
*env
,
328 const struct cl_io_slice
*ios
)
330 struct ccc_io
*cio
= ccc_env_io(env
);
331 struct cl_io
*io
= ios
->cis_io
;
335 if (cl_io_is_trunc(io
)) {
336 new_size
= io
->u
.ci_setattr
.sa_attr
.lvb_size
;
338 enqflags
= CEF_DISCARD_DATA
;
340 if ((io
->u
.ci_setattr
.sa_attr
.lvb_mtime
>=
341 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
) ||
342 (io
->u
.ci_setattr
.sa_attr
.lvb_atime
>=
343 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
))
347 cio
->u
.setattr
.cui_local_lock
= SETATTR_EXTENT_LOCK
;
348 return ccc_io_one_lock(env
, io
, enqflags
, CLM_WRITE
,
349 new_size
, OBD_OBJECT_EOF
);
352 static int vvp_do_vmtruncate(struct inode
*inode
, size_t size
)
356 * Only ll_inode_size_lock is taken at this level.
358 ll_inode_size_lock(inode
);
359 result
= inode_newsize_ok(inode
, size
);
361 ll_inode_size_unlock(inode
);
364 truncate_setsize(inode
, size
);
365 ll_inode_size_unlock(inode
);
369 static int vvp_io_setattr_trunc(const struct lu_env
*env
,
370 const struct cl_io_slice
*ios
,
371 struct inode
*inode
, loff_t size
)
373 inode_dio_wait(inode
);
377 static int vvp_io_setattr_time(const struct lu_env
*env
,
378 const struct cl_io_slice
*ios
)
380 struct cl_io
*io
= ios
->cis_io
;
381 struct cl_object
*obj
= io
->ci_obj
;
382 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
384 unsigned valid
= CAT_CTIME
;
386 cl_object_attr_lock(obj
);
387 attr
->cat_ctime
= io
->u
.ci_setattr
.sa_attr
.lvb_ctime
;
388 if (io
->u
.ci_setattr
.sa_valid
& ATTR_ATIME_SET
) {
389 attr
->cat_atime
= io
->u
.ci_setattr
.sa_attr
.lvb_atime
;
392 if (io
->u
.ci_setattr
.sa_valid
& ATTR_MTIME_SET
) {
393 attr
->cat_mtime
= io
->u
.ci_setattr
.sa_attr
.lvb_mtime
;
396 result
= cl_object_attr_set(env
, obj
, attr
, valid
);
397 cl_object_attr_unlock(obj
);
402 static int vvp_io_setattr_start(const struct lu_env
*env
,
403 const struct cl_io_slice
*ios
)
405 struct cl_io
*io
= ios
->cis_io
;
406 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
409 mutex_lock(&inode
->i_mutex
);
410 if (cl_io_is_trunc(io
))
411 result
= vvp_io_setattr_trunc(env
, ios
, inode
,
412 io
->u
.ci_setattr
.sa_attr
.lvb_size
);
414 result
= vvp_io_setattr_time(env
, ios
);
418 static void vvp_io_setattr_end(const struct lu_env
*env
,
419 const struct cl_io_slice
*ios
)
421 struct cl_io
*io
= ios
->cis_io
;
422 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
424 if (cl_io_is_trunc(io
)) {
425 /* Truncate in memory pages - they must be clean pages
426 * because osc has already notified to destroy osc_extents. */
427 vvp_do_vmtruncate(inode
, io
->u
.ci_setattr
.sa_attr
.lvb_size
);
428 inode_dio_write_done(inode
);
430 mutex_unlock(&inode
->i_mutex
);
433 static void vvp_io_setattr_fini(const struct lu_env
*env
,
434 const struct cl_io_slice
*ios
)
436 vvp_io_fini(env
, ios
);
439 static ssize_t
lustre_generic_file_read(struct file
*file
,
440 struct ccc_io
*vio
, loff_t
*ppos
)
442 return generic_file_aio_read(vio
->cui_iocb
, vio
->cui_iov
,
443 vio
->cui_nrsegs
, *ppos
);
446 static ssize_t
lustre_generic_file_write(struct file
*file
,
447 struct ccc_io
*vio
, loff_t
*ppos
)
449 return generic_file_aio_write(vio
->cui_iocb
, vio
->cui_iov
,
450 vio
->cui_nrsegs
, *ppos
);
453 static int vvp_io_read_start(const struct lu_env
*env
,
454 const struct cl_io_slice
*ios
)
456 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
457 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
458 struct cl_io
*io
= ios
->cis_io
;
459 struct cl_object
*obj
= io
->ci_obj
;
460 struct inode
*inode
= ccc_object_inode(obj
);
461 struct ll_ra_read
*bead
= &vio
->cui_bead
;
462 struct file
*file
= cio
->cui_fd
->fd_file
;
465 loff_t pos
= io
->u
.ci_rd
.rd
.crw_pos
;
466 long cnt
= io
->u
.ci_rd
.rd
.crw_count
;
467 long tot
= cio
->cui_tot_count
;
470 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
472 CDEBUG(D_VFSTRACE
, "read: -> [%lli, %lli)\n", pos
, pos
+ cnt
);
474 if (!can_populate_pages(env
, io
, inode
))
477 result
= ccc_prep_size(env
, obj
, io
, pos
, tot
, &exceed
);
480 else if (exceed
!= 0)
483 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
,
484 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
485 inode
->i_ino
, cnt
, pos
, i_size_read(inode
));
487 /* turn off the kernel's read-ahead */
488 cio
->cui_fd
->fd_file
->f_ra
.ra_pages
= 0;
490 /* initialize read-ahead window once per syscall */
491 if (!vio
->cui_ra_window_set
) {
492 vio
->cui_ra_window_set
= 1;
493 bead
->lrr_start
= cl_index(obj
, pos
);
495 * XXX: explicit PAGE_CACHE_SIZE
497 bead
->lrr_count
= cl_index(obj
, tot
+ PAGE_CACHE_SIZE
- 1);
498 ll_ra_read_in(file
, bead
);
503 switch (vio
->cui_io_subtype
) {
505 result
= lustre_generic_file_read(file
, cio
, &pos
);
508 result
= generic_file_splice_read(file
, &pos
,
509 vio
->u
.splice
.cui_pipe
, cnt
,
510 vio
->u
.splice
.cui_flags
);
511 /* LU-1109: do splice read stripe by stripe otherwise if it
512 * may make nfsd stuck if this read occupied all internal pipe
517 CERROR("Wrong IO type %u\n", vio
->cui_io_subtype
);
525 io
->ci_nob
+= result
;
526 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
527 cio
->cui_fd
, pos
, result
, READ
);
533 static void vvp_io_read_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
535 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
536 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
538 if (vio
->cui_ra_window_set
)
539 ll_ra_read_ex(cio
->cui_fd
->fd_file
, &vio
->cui_bead
);
541 vvp_io_fini(env
, ios
);
544 static int vvp_io_write_start(const struct lu_env
*env
,
545 const struct cl_io_slice
*ios
)
547 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
548 struct cl_io
*io
= ios
->cis_io
;
549 struct cl_object
*obj
= io
->ci_obj
;
550 struct inode
*inode
= ccc_object_inode(obj
);
551 struct file
*file
= cio
->cui_fd
->fd_file
;
553 loff_t pos
= io
->u
.ci_wr
.wr
.crw_pos
;
554 size_t cnt
= io
->u
.ci_wr
.wr
.crw_count
;
556 if (!can_populate_pages(env
, io
, inode
))
559 if (cl_io_is_append(io
)) {
561 * PARALLEL IO This has to be changed for parallel IO doing
562 * out-of-order writes.
564 pos
= io
->u
.ci_wr
.wr
.crw_pos
= i_size_read(inode
);
565 cio
->cui_iocb
->ki_pos
= pos
;
568 CDEBUG(D_VFSTRACE
, "write: [%lli, %lli)\n", pos
, pos
+ (long long)cnt
);
570 if (cio
->cui_iov
== NULL
) /* from a temp io in ll_cl_init(). */
573 result
= lustre_generic_file_write(file
, cio
, &pos
);
578 io
->ci_nob
+= result
;
579 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
580 cio
->cui_fd
, pos
, result
, WRITE
);
586 static int vvp_io_kernel_fault(struct vvp_fault_io
*cfio
)
588 struct vm_fault
*vmf
= cfio
->fault
.ft_vmf
;
590 cfio
->fault
.ft_flags
= filemap_fault(cfio
->ft_vma
, vmf
);
593 LL_CDEBUG_PAGE(D_PAGE
, vmf
->page
, "got addr %p type NOPAGE\n",
594 vmf
->virtual_address
);
595 if (unlikely(!(cfio
->fault
.ft_flags
& VM_FAULT_LOCKED
))) {
596 lock_page(vmf
->page
);
597 cfio
->fault
.ft_flags
&= VM_FAULT_LOCKED
;
600 cfio
->ft_vmpage
= vmf
->page
;
604 if (cfio
->fault
.ft_flags
& VM_FAULT_SIGBUS
) {
605 CDEBUG(D_PAGE
, "got addr %p - SIGBUS\n", vmf
->virtual_address
);
609 if (cfio
->fault
.ft_flags
& VM_FAULT_OOM
) {
610 CDEBUG(D_PAGE
, "got addr %p - OOM\n", vmf
->virtual_address
);
614 if (cfio
->fault
.ft_flags
& VM_FAULT_RETRY
)
617 CERROR("unknow error in page fault %d!\n", cfio
->fault
.ft_flags
);
622 static int vvp_io_fault_start(const struct lu_env
*env
,
623 const struct cl_io_slice
*ios
)
625 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
626 struct cl_io
*io
= ios
->cis_io
;
627 struct cl_object
*obj
= io
->ci_obj
;
628 struct inode
*inode
= ccc_object_inode(obj
);
629 struct cl_fault_io
*fio
= &io
->u
.ci_fault
;
630 struct vvp_fault_io
*cfio
= &vio
->u
.fault
;
633 struct page
*vmpage
= NULL
;
634 struct cl_page
*page
;
636 pgoff_t last
; /* last page in a file data region */
638 if (fio
->ft_executable
&&
639 LTIME_S(inode
->i_mtime
) != vio
->u
.fault
.ft_mtime
)
641 " changed while waiting for the page fault lock\n",
642 PFID(lu_object_fid(&obj
->co_lu
)));
644 /* offset of the last byte on the page */
645 offset
= cl_offset(obj
, fio
->ft_index
+ 1) - 1;
646 LASSERT(cl_index(obj
, offset
) == fio
->ft_index
);
647 result
= ccc_prep_size(env
, obj
, io
, 0, offset
+ 1, NULL
);
651 /* must return locked page */
652 if (fio
->ft_mkwrite
) {
653 LASSERT(cfio
->ft_vmpage
!= NULL
);
654 lock_page(cfio
->ft_vmpage
);
656 result
= vvp_io_kernel_fault(cfio
);
661 vmpage
= cfio
->ft_vmpage
;
662 LASSERT(PageLocked(vmpage
));
664 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE
))
665 ll_invalidate_page(vmpage
);
667 size
= i_size_read(inode
);
668 /* Though we have already held a cl_lock upon this page, but
669 * it still can be truncated locally. */
670 if (unlikely((vmpage
->mapping
!= inode
->i_mapping
) ||
671 (page_offset(vmpage
) > size
))) {
672 CDEBUG(D_PAGE
, "llite: fault and truncate race happened!\n");
674 /* return +1 to stop cl_io_loop() and ll_fault() will catch
676 GOTO(out
, result
= +1);
680 if (fio
->ft_mkwrite
) {
683 * Capture the size while holding the lli_trunc_sem from above
684 * we want to make sure that we complete the mkwrite action
685 * while holding this lock. We need to make sure that we are
686 * not past the end of the file.
688 last_index
= cl_index(obj
, size
- 1);
689 if (last_index
< fio
->ft_index
) {
691 "llite: mkwrite and truncate race happened: "
693 vmpage
->mapping
,fio
->ft_index
,last_index
);
695 * We need to return if we are
696 * passed the end of the file. This will propagate
697 * up the call stack to ll_page_mkwrite where
698 * we will return VM_FAULT_NOPAGE. Any non-negative
699 * value returned here will be silently
700 * converted to 0. If the vmpage->mapping is null
701 * the error code would be converted back to ENODATA
702 * in ll_page_mkwrite0. Thus we return -ENODATA
703 * to handle both cases
705 GOTO(out
, result
= -ENODATA
);
709 page
= cl_page_find(env
, obj
, fio
->ft_index
, vmpage
, CPT_CACHEABLE
);
711 GOTO(out
, result
= PTR_ERR(page
));
713 /* if page is going to be written, we should add this page into cache
715 if (fio
->ft_mkwrite
) {
716 wait_on_page_writeback(vmpage
);
717 if (set_page_dirty(vmpage
)) {
720 /* vvp_page_assume() calls wait_on_page_writeback(). */
721 cl_page_assume(env
, io
, page
);
723 cp
= cl2ccc_page(cl_page_at(page
, &vvp_device_type
));
724 vvp_write_pending(cl2ccc(obj
), cp
);
726 /* Do not set Dirty bit here so that in case IO is
727 * started before the page is really made dirty, we
728 * still have chance to detect it. */
729 result
= cl_page_cache_add(env
, io
, page
, CRT_WRITE
);
730 LASSERT(cl_page_is_owned(page
, io
));
734 cl_page_unmap(env
, io
, page
);
735 cl_page_discard(env
, io
, page
);
736 cl_page_disown(env
, io
, page
);
738 cl_page_put(env
, page
);
740 /* we're in big trouble, what can we do now? */
741 if (result
== -EDQUOT
)
745 cl_page_disown(env
, io
, page
);
749 last
= cl_index(obj
, size
- 1);
751 * The ft_index is only used in the case of
752 * a mkwrite action. We need to check
753 * our assertions are correct, since
754 * we should have caught this above
756 LASSERT(!fio
->ft_mkwrite
|| fio
->ft_index
<= last
);
757 if (fio
->ft_index
== last
)
759 * Last page is mapped partially.
761 fio
->ft_nob
= size
- cl_offset(obj
, fio
->ft_index
);
763 fio
->ft_nob
= cl_page_size(obj
);
765 lu_ref_add(&page
->cp_reference
, "fault", io
);
769 /* return unlocked vmpage to avoid deadlocking */
772 cfio
->fault
.ft_flags
&= ~VM_FAULT_LOCKED
;
776 static int vvp_io_fsync_start(const struct lu_env
*env
,
777 const struct cl_io_slice
*ios
)
779 /* we should mark TOWRITE bit to each dirty page in radix tree to
780 * verify pages have been written, but this is difficult because of
785 static int vvp_io_read_page(const struct lu_env
*env
,
786 const struct cl_io_slice
*ios
,
787 const struct cl_page_slice
*slice
)
789 struct cl_io
*io
= ios
->cis_io
;
790 struct cl_object
*obj
= slice
->cpl_obj
;
791 struct ccc_page
*cp
= cl2ccc_page(slice
);
792 struct cl_page
*page
= slice
->cpl_page
;
793 struct inode
*inode
= ccc_object_inode(obj
);
794 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
795 struct ll_file_data
*fd
= cl2ccc_io(env
, ios
)->cui_fd
;
796 struct ll_readahead_state
*ras
= &fd
->fd_ras
;
797 struct page
*vmpage
= cp
->cpg_page
;
798 struct cl_2queue
*queue
= &io
->ci_queue
;
801 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
802 LASSERT(slice
->cpl_obj
== obj
);
804 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
805 sbi
->ll_ra_info
.ra_max_pages
)
806 ras_update(sbi
, inode
, ras
, page
->cp_index
,
807 cp
->cpg_defer_uptodate
);
809 /* Sanity check whether the page is protected by a lock. */
810 rc
= cl_page_is_under_lock(env
, io
, page
);
812 CL_PAGE_HEADER(D_WARNING
, env
, page
, "%s: %d\n",
813 rc
== -ENODATA
? "without a lock" :
819 if (cp
->cpg_defer_uptodate
) {
821 cl_page_export(env
, page
, 1);
824 * Add page into the queue even when it is marked uptodate above.
825 * this will unlock it automatically as part of cl_page_list_disown().
827 cl_2queue_add(queue
, page
);
828 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
829 sbi
->ll_ra_info
.ra_max_pages
)
830 ll_readahead(env
, io
, ras
,
831 vmpage
->mapping
, &queue
->c2_qin
, fd
->fd_flags
);
836 static int vvp_page_sync_io(const struct lu_env
*env
, struct cl_io
*io
,
837 struct cl_page
*page
, struct ccc_page
*cp
,
838 enum cl_req_type crt
)
840 struct cl_2queue
*queue
;
843 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
845 queue
= &io
->ci_queue
;
846 cl_2queue_init_page(queue
, page
);
848 result
= cl_io_submit_sync(env
, io
, crt
, queue
, 0);
849 LASSERT(cl_page_is_owned(page
, io
));
853 * in CRT_WRITE case page is left locked even in case of
856 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
857 cl_2queue_fini(env
, queue
);
863 * Prepare partially written-to page for a write.
865 static int vvp_io_prepare_partial(const struct lu_env
*env
, struct cl_io
*io
,
866 struct cl_object
*obj
, struct cl_page
*pg
,
868 unsigned from
, unsigned to
)
870 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
871 loff_t offset
= cl_offset(obj
, pg
->cp_index
);
874 cl_object_attr_lock(obj
);
875 result
= cl_object_attr_get(env
, obj
, attr
);
876 cl_object_attr_unlock(obj
);
879 * If are writing to a new page, no need to read old data.
880 * The extent locking will have updated the KMS, and for our
881 * purposes here we can treat it like i_size.
883 if (attr
->cat_kms
<= offset
) {
884 char *kaddr
= kmap_atomic(cp
->cpg_page
);
886 memset(kaddr
, 0, cl_page_size(obj
));
887 kunmap_atomic(kaddr
);
888 } else if (cp
->cpg_defer_uptodate
)
891 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_READ
);
893 * In older implementations, obdo_refresh_inode is called here
894 * to update the inode because the write might modify the
895 * object info at OST. However, this has been proven useless,
896 * since LVB functions will be called when user space program
897 * tries to retrieve inode attribute. Also, see bug 15909 for
901 cl_page_export(env
, pg
, 1);
906 static int vvp_io_prepare_write(const struct lu_env
*env
,
907 const struct cl_io_slice
*ios
,
908 const struct cl_page_slice
*slice
,
909 unsigned from
, unsigned to
)
911 struct cl_object
*obj
= slice
->cpl_obj
;
912 struct ccc_page
*cp
= cl2ccc_page(slice
);
913 struct cl_page
*pg
= slice
->cpl_page
;
914 struct page
*vmpage
= cp
->cpg_page
;
918 LINVRNT(cl_page_is_vmlocked(env
, pg
));
919 LASSERT(vmpage
->mapping
->host
== ccc_object_inode(obj
));
923 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "preparing: [%d, %d]\n", from
, to
);
924 if (!PageUptodate(vmpage
)) {
926 * We're completely overwriting an existing page, so _don't_
927 * set it up to date until commit_write
929 if (from
== 0 && to
== PAGE_CACHE_SIZE
) {
930 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "full page write\n");
931 POISON_PAGE(page
, 0x11);
933 result
= vvp_io_prepare_partial(env
, ios
->cis_io
, obj
,
936 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "uptodate\n");
940 static int vvp_io_commit_write(const struct lu_env
*env
,
941 const struct cl_io_slice
*ios
,
942 const struct cl_page_slice
*slice
,
943 unsigned from
, unsigned to
)
945 struct cl_object
*obj
= slice
->cpl_obj
;
946 struct cl_io
*io
= ios
->cis_io
;
947 struct ccc_page
*cp
= cl2ccc_page(slice
);
948 struct cl_page
*pg
= slice
->cpl_page
;
949 struct inode
*inode
= ccc_object_inode(obj
);
950 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
951 struct ll_inode_info
*lli
= ll_i2info(inode
);
952 struct page
*vmpage
= cp
->cpg_page
;
958 LINVRNT(cl_page_is_vmlocked(env
, pg
));
959 LASSERT(vmpage
->mapping
->host
== inode
);
961 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
, "committing page write\n");
962 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "committing: [%d, %d]\n", from
, to
);
965 * queue a write for some time in the future the first time we
968 * This is different from what other file systems do: they usually
969 * just mark page (and some of its buffers) dirty and rely on
970 * balance_dirty_pages() to start a write-back. Lustre wants write-back
971 * to be started earlier for the following reasons:
973 * (1) with a large number of clients we need to limit the amount
974 * of cached data on the clients a lot;
976 * (2) large compute jobs generally want compute-only then io-only
977 * and the IO should complete as quickly as possible;
979 * (3) IO is batched up to the RPC size and is async until the
980 * client max cache is hit
981 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
984 if (!PageDirty(vmpage
)) {
985 tallyop
= LPROC_LL_DIRTY_MISSES
;
986 result
= cl_page_cache_add(env
, io
, pg
, CRT_WRITE
);
988 /* page was added into cache successfully. */
989 set_page_dirty(vmpage
);
990 vvp_write_pending(cl2ccc(obj
), cp
);
991 } else if (result
== -EDQUOT
) {
992 pgoff_t last_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
993 bool need_clip
= true;
996 * Client ran out of disk space grant. Possible
999 * (a) do a sync write, renewing grant;
1001 * (b) stop writing on this stripe, switch to the
1004 * (b) is a part of "parallel io" design that is the
1005 * ultimate goal. (a) is what "old" client did, and
1006 * what the new code continues to do for the time
1009 if (last_index
> pg
->cp_index
) {
1010 to
= PAGE_CACHE_SIZE
;
1012 } else if (last_index
== pg
->cp_index
) {
1013 int size_to
= i_size_read(inode
) & ~CFS_PAGE_MASK
;
1018 cl_page_clip(env
, pg
, 0, to
);
1019 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_WRITE
);
1021 CERROR("Write page %lu of inode %p failed %d\n",
1022 pg
->cp_index
, inode
, result
);
1025 tallyop
= LPROC_LL_DIRTY_HITS
;
1028 ll_stats_ops_tally(sbi
, tallyop
, 1);
1030 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1031 * because page could have been not flushed between 2 modifications.
1032 * It is important the file is marked DIRTY as soon as the I/O is done
1033 * Indeed, when cache is flushed, file could be already closed and it
1034 * is too late to warn the MDT.
1035 * It is acceptable that file is marked DIRTY even if I/O is dropped
1036 * for some reasons before being flushed to OST.
1039 spin_lock(&lli
->lli_lock
);
1040 lli
->lli_flags
|= LLIF_DATA_MODIFIED
;
1041 spin_unlock(&lli
->lli_lock
);
1044 size
= cl_offset(obj
, pg
->cp_index
) + to
;
1046 ll_inode_size_lock(inode
);
1048 if (size
> i_size_read(inode
)) {
1049 cl_isize_write_nolock(inode
, size
);
1050 CDEBUG(D_VFSTRACE
, DFID
" updating i_size %lu\n",
1051 PFID(lu_object_fid(&obj
->co_lu
)),
1052 (unsigned long)size
);
1054 cl_page_export(env
, pg
, 1);
1056 if (size
> i_size_read(inode
))
1057 cl_page_discard(env
, io
, pg
);
1059 ll_inode_size_unlock(inode
);
1063 static const struct cl_io_operations vvp_io_ops
= {
1066 .cio_fini
= vvp_io_read_fini
,
1067 .cio_lock
= vvp_io_read_lock
,
1068 .cio_start
= vvp_io_read_start
,
1069 .cio_advance
= ccc_io_advance
1072 .cio_fini
= vvp_io_fini
,
1073 .cio_lock
= vvp_io_write_lock
,
1074 .cio_start
= vvp_io_write_start
,
1075 .cio_advance
= ccc_io_advance
1078 .cio_fini
= vvp_io_setattr_fini
,
1079 .cio_iter_init
= vvp_io_setattr_iter_init
,
1080 .cio_lock
= vvp_io_setattr_lock
,
1081 .cio_start
= vvp_io_setattr_start
,
1082 .cio_end
= vvp_io_setattr_end
1085 .cio_fini
= vvp_io_fault_fini
,
1086 .cio_iter_init
= vvp_io_fault_iter_init
,
1087 .cio_lock
= vvp_io_fault_lock
,
1088 .cio_start
= vvp_io_fault_start
,
1089 .cio_end
= ccc_io_end
1092 .cio_start
= vvp_io_fsync_start
,
1093 .cio_fini
= vvp_io_fini
1096 .cio_fini
= vvp_io_fini
1099 .cio_read_page
= vvp_io_read_page
,
1100 .cio_prepare_write
= vvp_io_prepare_write
,
1101 .cio_commit_write
= vvp_io_commit_write
1104 int vvp_io_init(const struct lu_env
*env
, struct cl_object
*obj
,
1107 struct vvp_io
*vio
= vvp_env_io(env
);
1108 struct ccc_io
*cio
= ccc_env_io(env
);
1109 struct inode
*inode
= ccc_object_inode(obj
);
1112 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
1114 CL_IO_SLICE_CLEAN(cio
, cui_cl
);
1115 cl_io_slice_add(io
, &cio
->cui_cl
, obj
, &vvp_io_ops
);
1116 vio
->cui_ra_window_set
= 0;
1118 if (io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
) {
1120 struct ll_inode_info
*lli
= ll_i2info(inode
);
1122 count
= io
->u
.ci_rw
.crw_count
;
1123 /* "If nbyte is 0, read() will return 0 and have no other
1124 * results." -- Single Unix Spec */
1128 cio
->cui_tot_count
= count
;
1129 cio
->cui_tot_nrsegs
= 0;
1131 /* for read/write, we store the jobid in the inode, and
1132 * it'll be fetched by osc when building RPC.
1134 * it's not accurate if the file is shared by different
1137 lustre_get_jobid(lli
->lli_jobid
);
1138 } else if (io
->ci_type
== CIT_SETATTR
) {
1139 if (!cl_io_is_trunc(io
))
1140 io
->ci_lockreq
= CILR_MANDATORY
;
1143 /* ignore layout change for generic CIT_MISC but not for glimpse.
1144 * io context for glimpse must set ci_verify_layout to true,
1145 * see cl_glimpse_size0() for details. */
1146 if (io
->ci_type
== CIT_MISC
&& !io
->ci_verify_layout
)
1147 io
->ci_ignore_layout
= 1;
1149 /* Enqueue layout lock and get layout version. We need to do this
1150 * even for operations requiring to open file, such as read and write,
1151 * because it might not grant layout lock in IT_OPEN. */
1152 if (result
== 0 && !io
->ci_ignore_layout
) {
1153 result
= ll_layout_refresh(inode
, &cio
->cui_layout_gen
);
1154 if (result
== -ENOENT
)
1155 /* If the inode on MDS has been removed, but the objects
1156 * on OSTs haven't been destroyed (async unlink), layout
1157 * fetch will return -ENOENT, we'd ingore this error
1158 * and continue with dirty flush. LU-3230. */
1161 CERROR("%s: refresh file layout " DFID
" error %d.\n",
1162 ll_get_fsname(inode
->i_sb
, NULL
, 0),
1163 PFID(lu_object_fid(&obj
->co_lu
)), result
);
1169 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
1170 const struct cl_io_slice
*slice
)
1172 /* Caling just for assertion */
1173 cl2ccc_io(env
, slice
);
1174 return vvp_env_io(env
);