4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
47 #include "vvp_internal.h"
49 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
50 const struct cl_io_slice
*slice
);
53 * True, if \a io is a normal io, False for splice_{read,write}
55 int cl_is_normalio(const struct lu_env
*env
, const struct cl_io
*io
)
57 struct vvp_io
*vio
= vvp_env_io(env
);
59 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
61 return vio
->cui_io_subtype
== IO_NORMAL
;
65 * For swapping layout. The file's layout may have changed.
66 * To avoid populating pages to a wrong stripe, we have to verify the
67 * correctness of layout. It works because swapping layout processes
68 * have to acquire group lock.
70 static bool can_populate_pages(const struct lu_env
*env
, struct cl_io
*io
,
73 struct ll_inode_info
*lli
= ll_i2info(inode
);
74 struct ccc_io
*cio
= ccc_env_io(env
);
77 switch (io
->ci_type
) {
80 /* don't need lock here to check lli_layout_gen as we have held
81 * extent lock and GROUP lock has to hold to swap layout */
82 if (ll_layout_version_get(lli
) != cio
->cui_layout_gen
) {
83 io
->ci_need_restart
= 1;
84 /* this will return application a short read/write */
89 /* fault is okay because we've already had a page. */
97 /*****************************************************************************
103 static int vvp_io_fault_iter_init(const struct lu_env
*env
,
104 const struct cl_io_slice
*ios
)
106 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
107 struct inode
*inode
= ccc_object_inode(ios
->cis_obj
);
110 file_inode(cl2ccc_io(env
, ios
)->cui_fd
->fd_file
));
111 vio
->u
.fault
.ft_mtime
= inode
->i_mtime
.tv_sec
;
115 static void vvp_io_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
117 struct cl_io
*io
= ios
->cis_io
;
118 struct cl_object
*obj
= io
->ci_obj
;
119 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
121 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
123 CDEBUG(D_VFSTRACE
, DFID
124 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
125 PFID(lu_object_fid(&obj
->co_lu
)),
126 io
->ci_ignore_layout
, io
->ci_verify_layout
,
127 cio
->cui_layout_gen
, io
->ci_restore_needed
);
129 if (io
->ci_restore_needed
== 1) {
132 /* file was detected release, we need to restore it
133 * before finishing the io
135 rc
= ll_layout_restore(ccc_object_inode(obj
));
136 /* if restore registration failed, no restart,
137 * we will return -ENODATA */
138 /* The layout will change after restore, so we need to
139 * block on layout lock hold by the MDT
140 * as MDT will not send new layout in lvb (see LU-3124)
141 * we have to explicitly fetch it, all this will be done
142 * by ll_layout_refresh()
145 io
->ci_restore_needed
= 0;
146 io
->ci_need_restart
= 1;
147 io
->ci_verify_layout
= 1;
149 io
->ci_restore_needed
= 1;
150 io
->ci_need_restart
= 0;
151 io
->ci_verify_layout
= 0;
156 if (!io
->ci_ignore_layout
&& io
->ci_verify_layout
) {
159 /* check layout version */
160 ll_layout_refresh(ccc_object_inode(obj
), &gen
);
161 io
->ci_need_restart
= cio
->cui_layout_gen
!= gen
;
162 if (io
->ci_need_restart
) {
164 DFID
" layout changed from %d to %d.\n",
165 PFID(lu_object_fid(&obj
->co_lu
)),
166 cio
->cui_layout_gen
, gen
);
167 /* today successful restore is the only possible
169 /* restore was done, clear restoring state */
170 ll_i2info(ccc_object_inode(obj
))->lli_flags
&=
171 ~LLIF_FILE_RESTORING
;
176 static void vvp_io_fault_fini(const struct lu_env
*env
,
177 const struct cl_io_slice
*ios
)
179 struct cl_io
*io
= ios
->cis_io
;
180 struct cl_page
*page
= io
->u
.ci_fault
.ft_page
;
182 CLOBINVRNT(env
, io
->ci_obj
, ccc_object_invariant(io
->ci_obj
));
185 lu_ref_del(&page
->cp_reference
, "fault", io
);
186 cl_page_put(env
, page
);
187 io
->u
.ci_fault
.ft_page
= NULL
;
189 vvp_io_fini(env
, ios
);
192 static enum cl_lock_mode
vvp_mode_from_vma(struct vm_area_struct
*vma
)
195 * we only want to hold PW locks if the mmap() can generate
196 * writes back to the file and that only happens in shared
199 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_WRITE
))
204 static int vvp_mmap_locks(const struct lu_env
*env
,
205 struct ccc_io
*vio
, struct cl_io
*io
)
207 struct ccc_thread_info
*cti
= ccc_env_info(env
);
208 struct mm_struct
*mm
= current
->mm
;
209 struct vm_area_struct
*vma
;
210 struct cl_lock_descr
*descr
= &cti
->cti_descr
;
211 ldlm_policy_data_t policy
;
218 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
220 if (!cl_is_normalio(env
, io
))
223 if (vio
->cui_iter
== NULL
) /* nfs or loop back device write */
226 /* No MM (e.g. NFS)? No vmas too. */
230 iov_for_each(iov
, i
, *(vio
->cui_iter
)) {
231 addr
= (unsigned long)iov
.iov_base
;
236 count
+= addr
& (~CFS_PAGE_MASK
);
237 addr
&= CFS_PAGE_MASK
;
239 down_read(&mm
->mmap_sem
);
240 while ((vma
= our_vma(mm
, addr
, count
)) != NULL
) {
241 struct inode
*inode
= file_inode(vma
->vm_file
);
242 int flags
= CEF_MUST
;
244 if (ll_file_nolock(vma
->vm_file
)) {
246 * For no lock case, a lockless lock will be
253 * XXX: Required lock mode can be weakened: CIT_WRITE
254 * io only ever reads user level buffer, and CIT_READ
257 policy_from_vma(&policy
, vma
, addr
, count
);
258 descr
->cld_mode
= vvp_mode_from_vma(vma
);
259 descr
->cld_obj
= ll_i2info(inode
)->lli_clob
;
260 descr
->cld_start
= cl_index(descr
->cld_obj
,
261 policy
.l_extent
.start
);
262 descr
->cld_end
= cl_index(descr
->cld_obj
,
263 policy
.l_extent
.end
);
264 descr
->cld_enq_flags
= flags
;
265 result
= cl_io_lock_alloc_add(env
, io
, descr
);
267 CDEBUG(D_VFSTRACE
, "lock: %d: [%lu, %lu]\n",
268 descr
->cld_mode
, descr
->cld_start
,
272 up_read(&mm
->mmap_sem
);
276 if (vma
->vm_end
- addr
>= count
)
279 count
-= vma
->vm_end
- addr
;
282 up_read(&mm
->mmap_sem
);
287 static int vvp_io_rw_lock(const struct lu_env
*env
, struct cl_io
*io
,
288 enum cl_lock_mode mode
, loff_t start
, loff_t end
)
290 struct ccc_io
*cio
= ccc_env_io(env
);
294 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
296 ccc_io_update_iov(env
, cio
, io
);
298 if (io
->u
.ci_rw
.crw_nonblock
)
299 ast_flags
|= CEF_NONBLOCK
;
300 result
= vvp_mmap_locks(env
, cio
, io
);
302 result
= ccc_io_one_lock(env
, io
, ast_flags
, mode
, start
, end
);
306 static int vvp_io_read_lock(const struct lu_env
*env
,
307 const struct cl_io_slice
*ios
)
309 struct cl_io
*io
= ios
->cis_io
;
310 struct cl_io_rw_common
*rd
= &io
->u
.ci_rd
.rd
;
313 result
= vvp_io_rw_lock(env
, io
, CLM_READ
, rd
->crw_pos
,
314 rd
->crw_pos
+ rd
->crw_count
- 1);
319 static int vvp_io_fault_lock(const struct lu_env
*env
,
320 const struct cl_io_slice
*ios
)
322 struct cl_io
*io
= ios
->cis_io
;
323 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
325 * XXX LDLM_FL_CBPENDING
327 return ccc_io_one_lock_index
328 (env
, io
, 0, vvp_mode_from_vma(vio
->u
.fault
.ft_vma
),
329 io
->u
.ci_fault
.ft_index
, io
->u
.ci_fault
.ft_index
);
332 static int vvp_io_write_lock(const struct lu_env
*env
,
333 const struct cl_io_slice
*ios
)
335 struct cl_io
*io
= ios
->cis_io
;
339 if (io
->u
.ci_wr
.wr_append
) {
341 end
= OBD_OBJECT_EOF
;
343 start
= io
->u
.ci_wr
.wr
.crw_pos
;
344 end
= start
+ io
->u
.ci_wr
.wr
.crw_count
- 1;
346 return vvp_io_rw_lock(env
, io
, CLM_WRITE
, start
, end
);
349 static int vvp_io_setattr_iter_init(const struct lu_env
*env
,
350 const struct cl_io_slice
*ios
)
356 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
358 * Handles "lockless io" mode when extent locking is done by server.
360 static int vvp_io_setattr_lock(const struct lu_env
*env
,
361 const struct cl_io_slice
*ios
)
363 struct ccc_io
*cio
= ccc_env_io(env
);
364 struct cl_io
*io
= ios
->cis_io
;
368 if (cl_io_is_trunc(io
)) {
369 new_size
= io
->u
.ci_setattr
.sa_attr
.lvb_size
;
371 enqflags
= CEF_DISCARD_DATA
;
373 if ((io
->u
.ci_setattr
.sa_attr
.lvb_mtime
>=
374 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
) ||
375 (io
->u
.ci_setattr
.sa_attr
.lvb_atime
>=
376 io
->u
.ci_setattr
.sa_attr
.lvb_ctime
))
380 cio
->u
.setattr
.cui_local_lock
= SETATTR_EXTENT_LOCK
;
381 return ccc_io_one_lock(env
, io
, enqflags
, CLM_WRITE
,
382 new_size
, OBD_OBJECT_EOF
);
385 static int vvp_do_vmtruncate(struct inode
*inode
, size_t size
)
389 * Only ll_inode_size_lock is taken at this level.
391 ll_inode_size_lock(inode
);
392 result
= inode_newsize_ok(inode
, size
);
394 ll_inode_size_unlock(inode
);
397 truncate_setsize(inode
, size
);
398 ll_inode_size_unlock(inode
);
402 static int vvp_io_setattr_trunc(const struct lu_env
*env
,
403 const struct cl_io_slice
*ios
,
404 struct inode
*inode
, loff_t size
)
406 inode_dio_wait(inode
);
410 static int vvp_io_setattr_time(const struct lu_env
*env
,
411 const struct cl_io_slice
*ios
)
413 struct cl_io
*io
= ios
->cis_io
;
414 struct cl_object
*obj
= io
->ci_obj
;
415 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
417 unsigned valid
= CAT_CTIME
;
419 cl_object_attr_lock(obj
);
420 attr
->cat_ctime
= io
->u
.ci_setattr
.sa_attr
.lvb_ctime
;
421 if (io
->u
.ci_setattr
.sa_valid
& ATTR_ATIME_SET
) {
422 attr
->cat_atime
= io
->u
.ci_setattr
.sa_attr
.lvb_atime
;
425 if (io
->u
.ci_setattr
.sa_valid
& ATTR_MTIME_SET
) {
426 attr
->cat_mtime
= io
->u
.ci_setattr
.sa_attr
.lvb_mtime
;
429 result
= cl_object_attr_set(env
, obj
, attr
, valid
);
430 cl_object_attr_unlock(obj
);
435 static int vvp_io_setattr_start(const struct lu_env
*env
,
436 const struct cl_io_slice
*ios
)
438 struct cl_io
*io
= ios
->cis_io
;
439 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
443 if (cl_io_is_trunc(io
))
444 result
= vvp_io_setattr_trunc(env
, ios
, inode
,
445 io
->u
.ci_setattr
.sa_attr
.lvb_size
);
447 result
= vvp_io_setattr_time(env
, ios
);
451 static void vvp_io_setattr_end(const struct lu_env
*env
,
452 const struct cl_io_slice
*ios
)
454 struct cl_io
*io
= ios
->cis_io
;
455 struct inode
*inode
= ccc_object_inode(io
->ci_obj
);
457 if (cl_io_is_trunc(io
))
458 /* Truncate in memory pages - they must be clean pages
459 * because osc has already notified to destroy osc_extents. */
460 vvp_do_vmtruncate(inode
, io
->u
.ci_setattr
.sa_attr
.lvb_size
);
465 static void vvp_io_setattr_fini(const struct lu_env
*env
,
466 const struct cl_io_slice
*ios
)
468 vvp_io_fini(env
, ios
);
471 static int vvp_io_read_start(const struct lu_env
*env
,
472 const struct cl_io_slice
*ios
)
474 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
475 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
476 struct cl_io
*io
= ios
->cis_io
;
477 struct cl_object
*obj
= io
->ci_obj
;
478 struct inode
*inode
= ccc_object_inode(obj
);
479 struct ll_ra_read
*bead
= &vio
->cui_bead
;
480 struct file
*file
= cio
->cui_fd
->fd_file
;
483 loff_t pos
= io
->u
.ci_rd
.rd
.crw_pos
;
484 long cnt
= io
->u
.ci_rd
.rd
.crw_count
;
485 long tot
= cio
->cui_tot_count
;
488 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
490 CDEBUG(D_VFSTRACE
, "read: -> [%lli, %lli)\n", pos
, pos
+ cnt
);
492 if (!can_populate_pages(env
, io
, inode
))
495 result
= ccc_prep_size(env
, obj
, io
, pos
, tot
, &exceed
);
498 else if (exceed
!= 0)
501 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
,
502 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
503 inode
->i_ino
, cnt
, pos
, i_size_read(inode
));
505 /* turn off the kernel's read-ahead */
506 cio
->cui_fd
->fd_file
->f_ra
.ra_pages
= 0;
508 /* initialize read-ahead window once per syscall */
509 if (!vio
->cui_ra_window_set
) {
510 vio
->cui_ra_window_set
= 1;
511 bead
->lrr_start
= cl_index(obj
, pos
);
513 * XXX: explicit PAGE_CACHE_SIZE
515 bead
->lrr_count
= cl_index(obj
, tot
+ PAGE_CACHE_SIZE
- 1);
516 ll_ra_read_in(file
, bead
);
521 switch (vio
->cui_io_subtype
) {
523 LASSERT(cio
->cui_iocb
->ki_pos
== pos
);
524 result
= generic_file_read_iter(cio
->cui_iocb
, cio
->cui_iter
);
527 result
= generic_file_splice_read(file
, &pos
,
528 vio
->u
.splice
.cui_pipe
, cnt
,
529 vio
->u
.splice
.cui_flags
);
530 /* LU-1109: do splice read stripe by stripe otherwise if it
531 * may make nfsd stuck if this read occupied all internal pipe
536 CERROR("Wrong IO type %u\n", vio
->cui_io_subtype
);
544 io
->ci_nob
+= result
;
545 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
546 cio
->cui_fd
, pos
, result
, READ
);
552 static void vvp_io_read_fini(const struct lu_env
*env
, const struct cl_io_slice
*ios
)
554 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
555 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
557 if (vio
->cui_ra_window_set
)
558 ll_ra_read_ex(cio
->cui_fd
->fd_file
, &vio
->cui_bead
);
560 vvp_io_fini(env
, ios
);
563 static int vvp_io_write_start(const struct lu_env
*env
,
564 const struct cl_io_slice
*ios
)
566 struct ccc_io
*cio
= cl2ccc_io(env
, ios
);
567 struct cl_io
*io
= ios
->cis_io
;
568 struct cl_object
*obj
= io
->ci_obj
;
569 struct inode
*inode
= ccc_object_inode(obj
);
571 loff_t pos
= io
->u
.ci_wr
.wr
.crw_pos
;
572 size_t cnt
= io
->u
.ci_wr
.wr
.crw_count
;
574 if (!can_populate_pages(env
, io
, inode
))
577 if (cl_io_is_append(io
)) {
579 * PARALLEL IO This has to be changed for parallel IO doing
580 * out-of-order writes.
582 pos
= io
->u
.ci_wr
.wr
.crw_pos
= i_size_read(inode
);
583 cio
->cui_iocb
->ki_pos
= pos
;
585 LASSERT(cio
->cui_iocb
->ki_pos
== pos
);
588 CDEBUG(D_VFSTRACE
, "write: [%lli, %lli)\n", pos
, pos
+ (long long)cnt
);
590 if (cio
->cui_iter
== NULL
) /* from a temp io in ll_cl_init(). */
593 result
= generic_file_write_iter(cio
->cui_iocb
, cio
->cui_iter
);
598 io
->ci_nob
+= result
;
599 ll_rw_stats_tally(ll_i2sbi(inode
), current
->pid
,
600 cio
->cui_fd
, pos
, result
, WRITE
);
606 static int vvp_io_kernel_fault(struct vvp_fault_io
*cfio
)
608 struct vm_fault
*vmf
= cfio
->fault
.ft_vmf
;
610 cfio
->fault
.ft_flags
= filemap_fault(cfio
->ft_vma
, vmf
);
611 cfio
->fault
.ft_flags_valid
= 1;
615 "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
616 vmf
->page
, vmf
->page
->mapping
, vmf
->page
->index
,
617 (long)vmf
->page
->flags
, page_count(vmf
->page
),
618 page_private(vmf
->page
), vmf
->virtual_address
);
619 if (unlikely(!(cfio
->fault
.ft_flags
& VM_FAULT_LOCKED
))) {
620 lock_page(vmf
->page
);
621 cfio
->fault
.ft_flags
|= VM_FAULT_LOCKED
;
624 cfio
->ft_vmpage
= vmf
->page
;
628 if (cfio
->fault
.ft_flags
& (VM_FAULT_SIGBUS
| VM_FAULT_SIGSEGV
)) {
629 CDEBUG(D_PAGE
, "got addr %p - SIGBUS\n", vmf
->virtual_address
);
633 if (cfio
->fault
.ft_flags
& VM_FAULT_OOM
) {
634 CDEBUG(D_PAGE
, "got addr %p - OOM\n", vmf
->virtual_address
);
638 if (cfio
->fault
.ft_flags
& VM_FAULT_RETRY
)
641 CERROR("Unknown error in page fault %d!\n", cfio
->fault
.ft_flags
);
645 static int vvp_io_fault_start(const struct lu_env
*env
,
646 const struct cl_io_slice
*ios
)
648 struct vvp_io
*vio
= cl2vvp_io(env
, ios
);
649 struct cl_io
*io
= ios
->cis_io
;
650 struct cl_object
*obj
= io
->ci_obj
;
651 struct inode
*inode
= ccc_object_inode(obj
);
652 struct cl_fault_io
*fio
= &io
->u
.ci_fault
;
653 struct vvp_fault_io
*cfio
= &vio
->u
.fault
;
656 struct page
*vmpage
= NULL
;
657 struct cl_page
*page
;
659 pgoff_t last
; /* last page in a file data region */
661 if (fio
->ft_executable
&&
662 inode
->i_mtime
.tv_sec
!= vio
->u
.fault
.ft_mtime
)
664 " changed while waiting for the page fault lock\n",
665 PFID(lu_object_fid(&obj
->co_lu
)));
667 /* offset of the last byte on the page */
668 offset
= cl_offset(obj
, fio
->ft_index
+ 1) - 1;
669 LASSERT(cl_index(obj
, offset
) == fio
->ft_index
);
670 result
= ccc_prep_size(env
, obj
, io
, 0, offset
+ 1, NULL
);
674 /* must return locked page */
675 if (fio
->ft_mkwrite
) {
676 LASSERT(cfio
->ft_vmpage
!= NULL
);
677 lock_page(cfio
->ft_vmpage
);
679 result
= vvp_io_kernel_fault(cfio
);
684 vmpage
= cfio
->ft_vmpage
;
685 LASSERT(PageLocked(vmpage
));
687 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE
))
688 ll_invalidate_page(vmpage
);
690 size
= i_size_read(inode
);
691 /* Though we have already held a cl_lock upon this page, but
692 * it still can be truncated locally. */
693 if (unlikely((vmpage
->mapping
!= inode
->i_mapping
) ||
694 (page_offset(vmpage
) > size
))) {
695 CDEBUG(D_PAGE
, "llite: fault and truncate race happened!\n");
697 /* return +1 to stop cl_io_loop() and ll_fault() will catch
703 if (fio
->ft_mkwrite
) {
706 * Capture the size while holding the lli_trunc_sem from above
707 * we want to make sure that we complete the mkwrite action
708 * while holding this lock. We need to make sure that we are
709 * not past the end of the file.
711 last_index
= cl_index(obj
, size
- 1);
712 if (last_index
< fio
->ft_index
) {
714 "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
715 vmpage
->mapping
, fio
->ft_index
, last_index
);
717 * We need to return if we are
718 * passed the end of the file. This will propagate
719 * up the call stack to ll_page_mkwrite where
720 * we will return VM_FAULT_NOPAGE. Any non-negative
721 * value returned here will be silently
722 * converted to 0. If the vmpage->mapping is null
723 * the error code would be converted back to ENODATA
724 * in ll_page_mkwrite0. Thus we return -ENODATA
725 * to handle both cases
732 page
= cl_page_find(env
, obj
, fio
->ft_index
, vmpage
, CPT_CACHEABLE
);
734 result
= PTR_ERR(page
);
738 /* if page is going to be written, we should add this page into cache
740 if (fio
->ft_mkwrite
) {
741 wait_on_page_writeback(vmpage
);
742 if (set_page_dirty(vmpage
)) {
745 /* vvp_page_assume() calls wait_on_page_writeback(). */
746 cl_page_assume(env
, io
, page
);
748 cp
= cl2ccc_page(cl_page_at(page
, &vvp_device_type
));
749 vvp_write_pending(cl2ccc(obj
), cp
);
751 /* Do not set Dirty bit here so that in case IO is
752 * started before the page is really made dirty, we
753 * still have chance to detect it. */
754 result
= cl_page_cache_add(env
, io
, page
, CRT_WRITE
);
755 LASSERT(cl_page_is_owned(page
, io
));
759 cl_page_unmap(env
, io
, page
);
760 cl_page_discard(env
, io
, page
);
761 cl_page_disown(env
, io
, page
);
763 cl_page_put(env
, page
);
765 /* we're in big trouble, what can we do now? */
766 if (result
== -EDQUOT
)
770 cl_page_disown(env
, io
, page
);
774 last
= cl_index(obj
, size
- 1);
776 * The ft_index is only used in the case of
777 * a mkwrite action. We need to check
778 * our assertions are correct, since
779 * we should have caught this above
781 LASSERT(!fio
->ft_mkwrite
|| fio
->ft_index
<= last
);
782 if (fio
->ft_index
== last
)
784 * Last page is mapped partially.
786 fio
->ft_nob
= size
- cl_offset(obj
, fio
->ft_index
);
788 fio
->ft_nob
= cl_page_size(obj
);
790 lu_ref_add(&page
->cp_reference
, "fault", io
);
794 /* return unlocked vmpage to avoid deadlocking */
797 cfio
->fault
.ft_flags
&= ~VM_FAULT_LOCKED
;
801 static int vvp_io_fsync_start(const struct lu_env
*env
,
802 const struct cl_io_slice
*ios
)
804 /* we should mark TOWRITE bit to each dirty page in radix tree to
805 * verify pages have been written, but this is difficult because of
810 static int vvp_io_read_page(const struct lu_env
*env
,
811 const struct cl_io_slice
*ios
,
812 const struct cl_page_slice
*slice
)
814 struct cl_io
*io
= ios
->cis_io
;
815 struct cl_object
*obj
= slice
->cpl_obj
;
816 struct ccc_page
*cp
= cl2ccc_page(slice
);
817 struct cl_page
*page
= slice
->cpl_page
;
818 struct inode
*inode
= ccc_object_inode(obj
);
819 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
820 struct ll_file_data
*fd
= cl2ccc_io(env
, ios
)->cui_fd
;
821 struct ll_readahead_state
*ras
= &fd
->fd_ras
;
822 struct page
*vmpage
= cp
->cpg_page
;
823 struct cl_2queue
*queue
= &io
->ci_queue
;
826 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
827 LASSERT(slice
->cpl_obj
== obj
);
829 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
830 sbi
->ll_ra_info
.ra_max_pages
)
831 ras_update(sbi
, inode
, ras
, page
->cp_index
,
832 cp
->cpg_defer_uptodate
);
834 /* Sanity check whether the page is protected by a lock. */
835 rc
= cl_page_is_under_lock(env
, io
, page
);
837 CL_PAGE_HEADER(D_WARNING
, env
, page
, "%s: %d\n",
838 rc
== -ENODATA
? "without a lock" :
844 if (cp
->cpg_defer_uptodate
) {
846 cl_page_export(env
, page
, 1);
849 * Add page into the queue even when it is marked uptodate above.
850 * this will unlock it automatically as part of cl_page_list_disown().
852 cl_page_list_add(&queue
->c2_qin
, page
);
853 if (sbi
->ll_ra_info
.ra_max_pages_per_file
&&
854 sbi
->ll_ra_info
.ra_max_pages
)
855 ll_readahead(env
, io
, ras
,
856 vmpage
->mapping
, &queue
->c2_qin
, fd
->fd_flags
);
861 static int vvp_page_sync_io(const struct lu_env
*env
, struct cl_io
*io
,
862 struct cl_page
*page
, struct ccc_page
*cp
,
863 enum cl_req_type crt
)
865 struct cl_2queue
*queue
;
868 LASSERT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
);
870 queue
= &io
->ci_queue
;
871 cl_2queue_init_page(queue
, page
);
873 result
= cl_io_submit_sync(env
, io
, crt
, queue
, 0);
874 LASSERT(cl_page_is_owned(page
, io
));
878 * in CRT_WRITE case page is left locked even in case of
881 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
882 cl_2queue_fini(env
, queue
);
888 * Prepare partially written-to page for a write.
890 static int vvp_io_prepare_partial(const struct lu_env
*env
, struct cl_io
*io
,
891 struct cl_object
*obj
, struct cl_page
*pg
,
893 unsigned from
, unsigned to
)
895 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
896 loff_t offset
= cl_offset(obj
, pg
->cp_index
);
899 cl_object_attr_lock(obj
);
900 result
= cl_object_attr_get(env
, obj
, attr
);
901 cl_object_attr_unlock(obj
);
904 * If are writing to a new page, no need to read old data.
905 * The extent locking will have updated the KMS, and for our
906 * purposes here we can treat it like i_size.
908 if (attr
->cat_kms
<= offset
) {
909 char *kaddr
= kmap_atomic(cp
->cpg_page
);
911 memset(kaddr
, 0, cl_page_size(obj
));
912 kunmap_atomic(kaddr
);
913 } else if (cp
->cpg_defer_uptodate
)
916 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_READ
);
918 * In older implementations, obdo_refresh_inode is called here
919 * to update the inode because the write might modify the
920 * object info at OST. However, this has been proven useless,
921 * since LVB functions will be called when user space program
922 * tries to retrieve inode attribute. Also, see bug 15909 for
926 cl_page_export(env
, pg
, 1);
931 static int vvp_io_prepare_write(const struct lu_env
*env
,
932 const struct cl_io_slice
*ios
,
933 const struct cl_page_slice
*slice
,
934 unsigned from
, unsigned to
)
936 struct cl_object
*obj
= slice
->cpl_obj
;
937 struct ccc_page
*cp
= cl2ccc_page(slice
);
938 struct cl_page
*pg
= slice
->cpl_page
;
939 struct page
*vmpage
= cp
->cpg_page
;
943 LINVRNT(cl_page_is_vmlocked(env
, pg
));
944 LASSERT(vmpage
->mapping
->host
== ccc_object_inode(obj
));
948 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "preparing: [%d, %d]\n", from
, to
);
949 if (!PageUptodate(vmpage
)) {
951 * We're completely overwriting an existing page, so _don't_
952 * set it up to date until commit_write
954 if (from
== 0 && to
== PAGE_CACHE_SIZE
) {
955 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "full page write\n");
956 POISON_PAGE(page
, 0x11);
958 result
= vvp_io_prepare_partial(env
, ios
->cis_io
, obj
,
961 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "uptodate\n");
965 static int vvp_io_commit_write(const struct lu_env
*env
,
966 const struct cl_io_slice
*ios
,
967 const struct cl_page_slice
*slice
,
968 unsigned from
, unsigned to
)
970 struct cl_object
*obj
= slice
->cpl_obj
;
971 struct cl_io
*io
= ios
->cis_io
;
972 struct ccc_page
*cp
= cl2ccc_page(slice
);
973 struct cl_page
*pg
= slice
->cpl_page
;
974 struct inode
*inode
= ccc_object_inode(obj
);
975 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
976 struct ll_inode_info
*lli
= ll_i2info(inode
);
977 struct page
*vmpage
= cp
->cpg_page
;
983 LINVRNT(cl_page_is_vmlocked(env
, pg
));
984 LASSERT(vmpage
->mapping
->host
== inode
);
986 LU_OBJECT_HEADER(D_INODE
, env
, &obj
->co_lu
, "committing page write\n");
987 CL_PAGE_HEADER(D_PAGE
, env
, pg
, "committing: [%d, %d]\n", from
, to
);
990 * queue a write for some time in the future the first time we
993 * This is different from what other file systems do: they usually
994 * just mark page (and some of its buffers) dirty and rely on
995 * balance_dirty_pages() to start a write-back. Lustre wants write-back
996 * to be started earlier for the following reasons:
998 * (1) with a large number of clients we need to limit the amount
999 * of cached data on the clients a lot;
1001 * (2) large compute jobs generally want compute-only then io-only
1002 * and the IO should complete as quickly as possible;
1004 * (3) IO is batched up to the RPC size and is async until the
1005 * client max cache is hit
1006 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1009 if (!PageDirty(vmpage
)) {
1010 tallyop
= LPROC_LL_DIRTY_MISSES
;
1011 result
= cl_page_cache_add(env
, io
, pg
, CRT_WRITE
);
1013 /* page was added into cache successfully. */
1014 set_page_dirty(vmpage
);
1015 vvp_write_pending(cl2ccc(obj
), cp
);
1016 } else if (result
== -EDQUOT
) {
1017 pgoff_t last_index
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
1018 bool need_clip
= true;
1021 * Client ran out of disk space grant. Possible
1024 * (a) do a sync write, renewing grant;
1026 * (b) stop writing on this stripe, switch to the
1029 * (b) is a part of "parallel io" design that is the
1030 * ultimate goal. (a) is what "old" client did, and
1031 * what the new code continues to do for the time
1034 if (last_index
> pg
->cp_index
) {
1035 to
= PAGE_CACHE_SIZE
;
1037 } else if (last_index
== pg
->cp_index
) {
1038 int size_to
= i_size_read(inode
) & ~CFS_PAGE_MASK
;
1044 cl_page_clip(env
, pg
, 0, to
);
1045 result
= vvp_page_sync_io(env
, io
, pg
, cp
, CRT_WRITE
);
1047 CERROR("Write page %lu of inode %p failed %d\n",
1048 pg
->cp_index
, inode
, result
);
1051 tallyop
= LPROC_LL_DIRTY_HITS
;
1054 ll_stats_ops_tally(sbi
, tallyop
, 1);
1056 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1057 * because page could have been not flushed between 2 modifications.
1058 * It is important the file is marked DIRTY as soon as the I/O is done
1059 * Indeed, when cache is flushed, file could be already closed and it
1060 * is too late to warn the MDT.
1061 * It is acceptable that file is marked DIRTY even if I/O is dropped
1062 * for some reasons before being flushed to OST.
1065 spin_lock(&lli
->lli_lock
);
1066 lli
->lli_flags
|= LLIF_DATA_MODIFIED
;
1067 spin_unlock(&lli
->lli_lock
);
1070 size
= cl_offset(obj
, pg
->cp_index
) + to
;
1072 ll_inode_size_lock(inode
);
1074 if (size
> i_size_read(inode
)) {
1075 cl_isize_write_nolock(inode
, size
);
1076 CDEBUG(D_VFSTRACE
, DFID
" updating i_size %lu\n",
1077 PFID(lu_object_fid(&obj
->co_lu
)),
1078 (unsigned long)size
);
1080 cl_page_export(env
, pg
, 1);
1082 if (size
> i_size_read(inode
))
1083 cl_page_discard(env
, io
, pg
);
1085 ll_inode_size_unlock(inode
);
1089 static const struct cl_io_operations vvp_io_ops
= {
1092 .cio_fini
= vvp_io_read_fini
,
1093 .cio_lock
= vvp_io_read_lock
,
1094 .cio_start
= vvp_io_read_start
,
1095 .cio_advance
= ccc_io_advance
1098 .cio_fini
= vvp_io_fini
,
1099 .cio_lock
= vvp_io_write_lock
,
1100 .cio_start
= vvp_io_write_start
,
1101 .cio_advance
= ccc_io_advance
1104 .cio_fini
= vvp_io_setattr_fini
,
1105 .cio_iter_init
= vvp_io_setattr_iter_init
,
1106 .cio_lock
= vvp_io_setattr_lock
,
1107 .cio_start
= vvp_io_setattr_start
,
1108 .cio_end
= vvp_io_setattr_end
1111 .cio_fini
= vvp_io_fault_fini
,
1112 .cio_iter_init
= vvp_io_fault_iter_init
,
1113 .cio_lock
= vvp_io_fault_lock
,
1114 .cio_start
= vvp_io_fault_start
,
1115 .cio_end
= ccc_io_end
1118 .cio_start
= vvp_io_fsync_start
,
1119 .cio_fini
= vvp_io_fini
1122 .cio_fini
= vvp_io_fini
1125 .cio_read_page
= vvp_io_read_page
,
1126 .cio_prepare_write
= vvp_io_prepare_write
,
1127 .cio_commit_write
= vvp_io_commit_write
1130 int vvp_io_init(const struct lu_env
*env
, struct cl_object
*obj
,
1133 struct vvp_io
*vio
= vvp_env_io(env
);
1134 struct ccc_io
*cio
= ccc_env_io(env
);
1135 struct inode
*inode
= ccc_object_inode(obj
);
1138 CLOBINVRNT(env
, obj
, ccc_object_invariant(obj
));
1140 CDEBUG(D_VFSTRACE
, DFID
1141 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1142 PFID(lu_object_fid(&obj
->co_lu
)),
1143 io
->ci_ignore_layout
, io
->ci_verify_layout
,
1144 cio
->cui_layout_gen
, io
->ci_restore_needed
);
1146 CL_IO_SLICE_CLEAN(cio
, cui_cl
);
1147 cl_io_slice_add(io
, &cio
->cui_cl
, obj
, &vvp_io_ops
);
1148 vio
->cui_ra_window_set
= 0;
1150 if (io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
) {
1152 struct ll_inode_info
*lli
= ll_i2info(inode
);
1154 count
= io
->u
.ci_rw
.crw_count
;
1155 /* "If nbyte is 0, read() will return 0 and have no other
1156 * results." -- Single Unix Spec */
1160 cio
->cui_tot_count
= count
;
1162 /* for read/write, we store the jobid in the inode, and
1163 * it'll be fetched by osc when building RPC.
1165 * it's not accurate if the file is shared by different
1168 lustre_get_jobid(lli
->lli_jobid
);
1169 } else if (io
->ci_type
== CIT_SETATTR
) {
1170 if (!cl_io_is_trunc(io
))
1171 io
->ci_lockreq
= CILR_MANDATORY
;
1174 /* ignore layout change for generic CIT_MISC but not for glimpse.
1175 * io context for glimpse must set ci_verify_layout to true,
1176 * see cl_glimpse_size0() for details. */
1177 if (io
->ci_type
== CIT_MISC
&& !io
->ci_verify_layout
)
1178 io
->ci_ignore_layout
= 1;
1180 /* Enqueue layout lock and get layout version. We need to do this
1181 * even for operations requiring to open file, such as read and write,
1182 * because it might not grant layout lock in IT_OPEN. */
1183 if (result
== 0 && !io
->ci_ignore_layout
) {
1184 result
= ll_layout_refresh(inode
, &cio
->cui_layout_gen
);
1185 if (result
== -ENOENT
)
1186 /* If the inode on MDS has been removed, but the objects
1187 * on OSTs haven't been destroyed (async unlink), layout
1188 * fetch will return -ENOENT, we'd ignore this error
1189 * and continue with dirty flush. LU-3230. */
1192 CERROR("%s: refresh file layout " DFID
" error %d.\n",
1193 ll_get_fsname(inode
->i_sb
, NULL
, 0),
1194 PFID(lu_object_fid(&obj
->co_lu
)), result
);
1200 static struct vvp_io
*cl2vvp_io(const struct lu_env
*env
,
1201 const struct cl_io_slice
*slice
)
1203 /* Calling just for assertion */
1204 cl2ccc_io(env
, slice
);
1205 return vvp_env_io(env
);