staging: lustre: llite: remove ccflags from Makefile
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / vvp_io.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_io for VVP layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44
45 #include "../include/obd.h"
46 #include "../include/lustre_lite.h"
47
48 #include "vvp_internal.h"
49
50 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
51 const struct cl_io_slice *slice);
52
53 /**
54 * True, if \a io is a normal io, False for splice_{read,write}
55 */
56 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
57 {
58 struct vvp_io *vio = vvp_env_io(env);
59
60 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
61
62 return vio->cui_io_subtype == IO_NORMAL;
63 }
64
65 /**
66 * For swapping layout. The file's layout may have changed.
67 * To avoid populating pages to a wrong stripe, we have to verify the
68 * correctness of layout. It works because swapping layout processes
69 * have to acquire group lock.
70 */
71 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 struct inode *inode)
73 {
74 struct ll_inode_info *lli = ll_i2info(inode);
75 struct ccc_io *cio = ccc_env_io(env);
76 bool rc = true;
77
78 switch (io->ci_type) {
79 case CIT_READ:
80 case CIT_WRITE:
81 /* don't need lock here to check lli_layout_gen as we have held
82 * extent lock and GROUP lock has to hold to swap layout */
83 if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
84 io->ci_need_restart = 1;
85 /* this will return application a short read/write */
86 io->ci_continue = 0;
87 rc = false;
88 }
89 case CIT_FAULT:
90 /* fault is okay because we've already had a page. */
91 default:
92 break;
93 }
94
95 return rc;
96 }
97
98 /*****************************************************************************
99 *
100 * io operations.
101 *
102 */
103
104 static int vvp_io_fault_iter_init(const struct lu_env *env,
105 const struct cl_io_slice *ios)
106 {
107 struct vvp_io *vio = cl2vvp_io(env, ios);
108 struct inode *inode = ccc_object_inode(ios->cis_obj);
109
110 LASSERT(inode ==
111 cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
112 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
113 return 0;
114 }
115
116 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
117 {
118 struct cl_io *io = ios->cis_io;
119 struct cl_object *obj = io->ci_obj;
120 struct ccc_io *cio = cl2ccc_io(env, ios);
121
122 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
123
124 CDEBUG(D_VFSTRACE, DFID
125 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 PFID(lu_object_fid(&obj->co_lu)),
127 io->ci_ignore_layout, io->ci_verify_layout,
128 cio->cui_layout_gen, io->ci_restore_needed);
129
130 if (io->ci_restore_needed == 1) {
131 int rc;
132
133 /* file was detected release, we need to restore it
134 * before finishing the io
135 */
136 rc = ll_layout_restore(ccc_object_inode(obj));
137 /* if restore registration failed, no restart,
138 * we will return -ENODATA */
139 /* The layout will change after restore, so we need to
140 * block on layout lock hold by the MDT
141 * as MDT will not send new layout in lvb (see LU-3124)
142 * we have to explicitly fetch it, all this will be done
143 * by ll_layout_refresh()
144 */
145 if (rc == 0) {
146 io->ci_restore_needed = 0;
147 io->ci_need_restart = 1;
148 io->ci_verify_layout = 1;
149 } else {
150 io->ci_restore_needed = 1;
151 io->ci_need_restart = 0;
152 io->ci_verify_layout = 0;
153 io->ci_result = rc;
154 }
155 }
156
157 if (!io->ci_ignore_layout && io->ci_verify_layout) {
158 __u32 gen = 0;
159
160 /* check layout version */
161 ll_layout_refresh(ccc_object_inode(obj), &gen);
162 io->ci_need_restart = cio->cui_layout_gen != gen;
163 if (io->ci_need_restart) {
164 CDEBUG(D_VFSTRACE,
165 DFID" layout changed from %d to %d.\n",
166 PFID(lu_object_fid(&obj->co_lu)),
167 cio->cui_layout_gen, gen);
168 /* today successful restore is the only possible
169 * case */
170 /* restore was done, clear restoring state */
171 ll_i2info(ccc_object_inode(obj))->lli_flags &=
172 ~LLIF_FILE_RESTORING;
173 }
174 }
175 }
176
177 static void vvp_io_fault_fini(const struct lu_env *env,
178 const struct cl_io_slice *ios)
179 {
180 struct cl_io *io = ios->cis_io;
181 struct cl_page *page = io->u.ci_fault.ft_page;
182
183 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
184
185 if (page != NULL) {
186 lu_ref_del(&page->cp_reference, "fault", io);
187 cl_page_put(env, page);
188 io->u.ci_fault.ft_page = NULL;
189 }
190 vvp_io_fini(env, ios);
191 }
192
193 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
194 {
195 /*
196 * we only want to hold PW locks if the mmap() can generate
197 * writes back to the file and that only happens in shared
198 * writable vmas
199 */
200 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
201 return CLM_WRITE;
202 return CLM_READ;
203 }
204
205 static int vvp_mmap_locks(const struct lu_env *env,
206 struct ccc_io *vio, struct cl_io *io)
207 {
208 struct ccc_thread_info *cti = ccc_env_info(env);
209 struct mm_struct *mm = current->mm;
210 struct vm_area_struct *vma;
211 struct cl_lock_descr *descr = &cti->cti_descr;
212 ldlm_policy_data_t policy;
213 unsigned long addr;
214 ssize_t count;
215 int result;
216 struct iov_iter i;
217 struct iovec iov;
218
219 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
220
221 if (!cl_is_normalio(env, io))
222 return 0;
223
224 if (vio->cui_iter == NULL) /* nfs or loop back device write */
225 return 0;
226
227 /* No MM (e.g. NFS)? No vmas too. */
228 if (mm == NULL)
229 return 0;
230
231 iov_for_each(iov, i, *(vio->cui_iter)) {
232 addr = (unsigned long)iov.iov_base;
233 count = iov.iov_len;
234 if (count == 0)
235 continue;
236
237 count += addr & (~CFS_PAGE_MASK);
238 addr &= CFS_PAGE_MASK;
239
240 down_read(&mm->mmap_sem);
241 while((vma = our_vma(mm, addr, count)) != NULL) {
242 struct inode *inode = vma->vm_file->f_dentry->d_inode;
243 int flags = CEF_MUST;
244
245 if (ll_file_nolock(vma->vm_file)) {
246 /*
247 * For no lock case, a lockless lock will be
248 * generated.
249 */
250 flags = CEF_NEVER;
251 }
252
253 /*
254 * XXX: Required lock mode can be weakened: CIT_WRITE
255 * io only ever reads user level buffer, and CIT_READ
256 * only writes on it.
257 */
258 policy_from_vma(&policy, vma, addr, count);
259 descr->cld_mode = vvp_mode_from_vma(vma);
260 descr->cld_obj = ll_i2info(inode)->lli_clob;
261 descr->cld_start = cl_index(descr->cld_obj,
262 policy.l_extent.start);
263 descr->cld_end = cl_index(descr->cld_obj,
264 policy.l_extent.end);
265 descr->cld_enq_flags = flags;
266 result = cl_io_lock_alloc_add(env, io, descr);
267
268 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
269 descr->cld_mode, descr->cld_start,
270 descr->cld_end);
271
272 if (result < 0) {
273 up_read(&mm->mmap_sem);
274 return result;
275 }
276
277 if (vma->vm_end - addr >= count)
278 break;
279
280 count -= vma->vm_end - addr;
281 addr = vma->vm_end;
282 }
283 up_read(&mm->mmap_sem);
284 }
285 return 0;
286 }
287
288 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
289 enum cl_lock_mode mode, loff_t start, loff_t end)
290 {
291 struct ccc_io *cio = ccc_env_io(env);
292 int result;
293 int ast_flags = 0;
294
295 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
296
297 ccc_io_update_iov(env, cio, io);
298
299 if (io->u.ci_rw.crw_nonblock)
300 ast_flags |= CEF_NONBLOCK;
301 result = vvp_mmap_locks(env, cio, io);
302 if (result == 0)
303 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
304 return result;
305 }
306
307 static int vvp_io_read_lock(const struct lu_env *env,
308 const struct cl_io_slice *ios)
309 {
310 struct cl_io *io = ios->cis_io;
311 struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
312 int result;
313
314 /* XXX: Layer violation, we shouldn't see lsm at llite level. */
315 if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
316 result = vvp_io_rw_lock(env, io, CLM_READ,
317 io->u.ci_rd.rd.crw_pos,
318 io->u.ci_rd.rd.crw_pos +
319 io->u.ci_rd.rd.crw_count - 1);
320 else
321 result = 0;
322 return result;
323 }
324
325 static int vvp_io_fault_lock(const struct lu_env *env,
326 const struct cl_io_slice *ios)
327 {
328 struct cl_io *io = ios->cis_io;
329 struct vvp_io *vio = cl2vvp_io(env, ios);
330 /*
331 * XXX LDLM_FL_CBPENDING
332 */
333 return ccc_io_one_lock_index
334 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
335 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
336 }
337
338 static int vvp_io_write_lock(const struct lu_env *env,
339 const struct cl_io_slice *ios)
340 {
341 struct cl_io *io = ios->cis_io;
342 loff_t start;
343 loff_t end;
344
345 if (io->u.ci_wr.wr_append) {
346 start = 0;
347 end = OBD_OBJECT_EOF;
348 } else {
349 start = io->u.ci_wr.wr.crw_pos;
350 end = start + io->u.ci_wr.wr.crw_count - 1;
351 }
352 return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
353 }
354
355 static int vvp_io_setattr_iter_init(const struct lu_env *env,
356 const struct cl_io_slice *ios)
357 {
358 return 0;
359 }
360
361 /**
362 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
363 *
364 * Handles "lockless io" mode when extent locking is done by server.
365 */
366 static int vvp_io_setattr_lock(const struct lu_env *env,
367 const struct cl_io_slice *ios)
368 {
369 struct ccc_io *cio = ccc_env_io(env);
370 struct cl_io *io = ios->cis_io;
371 __u64 new_size;
372 __u32 enqflags = 0;
373
374 if (cl_io_is_trunc(io)) {
375 new_size = io->u.ci_setattr.sa_attr.lvb_size;
376 if (new_size == 0)
377 enqflags = CEF_DISCARD_DATA;
378 } else {
379 if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
380 io->u.ci_setattr.sa_attr.lvb_ctime) ||
381 (io->u.ci_setattr.sa_attr.lvb_atime >=
382 io->u.ci_setattr.sa_attr.lvb_ctime))
383 return 0;
384 new_size = 0;
385 }
386 cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
387 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
388 new_size, OBD_OBJECT_EOF);
389 }
390
391 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
392 {
393 int result;
394 /*
395 * Only ll_inode_size_lock is taken at this level.
396 */
397 ll_inode_size_lock(inode);
398 result = inode_newsize_ok(inode, size);
399 if (result < 0) {
400 ll_inode_size_unlock(inode);
401 return result;
402 }
403 truncate_setsize(inode, size);
404 ll_inode_size_unlock(inode);
405 return result;
406 }
407
408 static int vvp_io_setattr_trunc(const struct lu_env *env,
409 const struct cl_io_slice *ios,
410 struct inode *inode, loff_t size)
411 {
412 inode_dio_wait(inode);
413 return 0;
414 }
415
416 static int vvp_io_setattr_time(const struct lu_env *env,
417 const struct cl_io_slice *ios)
418 {
419 struct cl_io *io = ios->cis_io;
420 struct cl_object *obj = io->ci_obj;
421 struct cl_attr *attr = ccc_env_thread_attr(env);
422 int result;
423 unsigned valid = CAT_CTIME;
424
425 cl_object_attr_lock(obj);
426 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
427 if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
428 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
429 valid |= CAT_ATIME;
430 }
431 if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
432 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
433 valid |= CAT_MTIME;
434 }
435 result = cl_object_attr_set(env, obj, attr, valid);
436 cl_object_attr_unlock(obj);
437
438 return result;
439 }
440
441 static int vvp_io_setattr_start(const struct lu_env *env,
442 const struct cl_io_slice *ios)
443 {
444 struct cl_io *io = ios->cis_io;
445 struct inode *inode = ccc_object_inode(io->ci_obj);
446 int result = 0;
447
448 mutex_lock(&inode->i_mutex);
449 if (cl_io_is_trunc(io))
450 result = vvp_io_setattr_trunc(env, ios, inode,
451 io->u.ci_setattr.sa_attr.lvb_size);
452 if (result == 0)
453 result = vvp_io_setattr_time(env, ios);
454 return result;
455 }
456
457 static void vvp_io_setattr_end(const struct lu_env *env,
458 const struct cl_io_slice *ios)
459 {
460 struct cl_io *io = ios->cis_io;
461 struct inode *inode = ccc_object_inode(io->ci_obj);
462
463 if (cl_io_is_trunc(io)) {
464 /* Truncate in memory pages - they must be clean pages
465 * because osc has already notified to destroy osc_extents. */
466 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
467 inode_dio_write_done(inode);
468 }
469 mutex_unlock(&inode->i_mutex);
470 }
471
472 static void vvp_io_setattr_fini(const struct lu_env *env,
473 const struct cl_io_slice *ios)
474 {
475 vvp_io_fini(env, ios);
476 }
477
478 static int vvp_io_read_start(const struct lu_env *env,
479 const struct cl_io_slice *ios)
480 {
481 struct vvp_io *vio = cl2vvp_io(env, ios);
482 struct ccc_io *cio = cl2ccc_io(env, ios);
483 struct cl_io *io = ios->cis_io;
484 struct cl_object *obj = io->ci_obj;
485 struct inode *inode = ccc_object_inode(obj);
486 struct ll_ra_read *bead = &vio->cui_bead;
487 struct file *file = cio->cui_fd->fd_file;
488
489 int result;
490 loff_t pos = io->u.ci_rd.rd.crw_pos;
491 long cnt = io->u.ci_rd.rd.crw_count;
492 long tot = cio->cui_tot_count;
493 int exceed = 0;
494
495 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
496
497 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
498
499 if (!can_populate_pages(env, io, inode))
500 return 0;
501
502 result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
503 if (result != 0)
504 return result;
505 else if (exceed != 0)
506 goto out;
507
508 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
509 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
510 inode->i_ino, cnt, pos, i_size_read(inode));
511
512 /* turn off the kernel's read-ahead */
513 cio->cui_fd->fd_file->f_ra.ra_pages = 0;
514
515 /* initialize read-ahead window once per syscall */
516 if (!vio->cui_ra_window_set) {
517 vio->cui_ra_window_set = 1;
518 bead->lrr_start = cl_index(obj, pos);
519 /*
520 * XXX: explicit PAGE_CACHE_SIZE
521 */
522 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
523 ll_ra_read_in(file, bead);
524 }
525
526 /* BUG: 5972 */
527 file_accessed(file);
528 switch (vio->cui_io_subtype) {
529 case IO_NORMAL:
530 LASSERT(cio->cui_iocb->ki_pos == pos);
531 result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
532 break;
533 case IO_SPLICE:
534 result = generic_file_splice_read(file, &pos,
535 vio->u.splice.cui_pipe, cnt,
536 vio->u.splice.cui_flags);
537 /* LU-1109: do splice read stripe by stripe otherwise if it
538 * may make nfsd stuck if this read occupied all internal pipe
539 * buffers. */
540 io->ci_continue = 0;
541 break;
542 default:
543 CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
544 LBUG();
545 }
546
547 out:
548 if (result >= 0) {
549 if (result < cnt)
550 io->ci_continue = 0;
551 io->ci_nob += result;
552 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
553 cio->cui_fd, pos, result, READ);
554 result = 0;
555 }
556 return result;
557 }
558
559 static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
560 {
561 struct vvp_io *vio = cl2vvp_io(env, ios);
562 struct ccc_io *cio = cl2ccc_io(env, ios);
563
564 if (vio->cui_ra_window_set)
565 ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
566
567 vvp_io_fini(env, ios);
568 }
569
570 static int vvp_io_write_start(const struct lu_env *env,
571 const struct cl_io_slice *ios)
572 {
573 struct ccc_io *cio = cl2ccc_io(env, ios);
574 struct cl_io *io = ios->cis_io;
575 struct cl_object *obj = io->ci_obj;
576 struct inode *inode = ccc_object_inode(obj);
577 ssize_t result = 0;
578 loff_t pos = io->u.ci_wr.wr.crw_pos;
579 size_t cnt = io->u.ci_wr.wr.crw_count;
580
581 if (!can_populate_pages(env, io, inode))
582 return 0;
583
584 if (cl_io_is_append(io)) {
585 /*
586 * PARALLEL IO This has to be changed for parallel IO doing
587 * out-of-order writes.
588 */
589 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
590 cio->cui_iocb->ki_pos = pos;
591 } else {
592 LASSERT(cio->cui_iocb->ki_pos == pos);
593 }
594
595 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
596
597 if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
598 result = 0;
599 else
600 result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
601
602 if (result > 0) {
603 if (result < cnt)
604 io->ci_continue = 0;
605 io->ci_nob += result;
606 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
607 cio->cui_fd, pos, result, WRITE);
608 result = 0;
609 }
610 return result;
611 }
612
613 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
614 {
615 struct vm_fault *vmf = cfio->fault.ft_vmf;
616
617 cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
618
619 if (vmf->page) {
620 CDEBUG(D_PAGE,
621 "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
622 vmf->page, vmf->page->mapping, vmf->page->index,
623 (long)vmf->page->flags, page_count(vmf->page),
624 page_private(vmf->page), vmf->virtual_address);
625 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
626 lock_page(vmf->page);
627 cfio->fault.ft_flags |= VM_FAULT_LOCKED;
628 }
629
630 cfio->ft_vmpage = vmf->page;
631 return 0;
632 }
633
634 if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
635 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
636 return -EFAULT;
637 }
638
639 if (cfio->fault.ft_flags & VM_FAULT_OOM) {
640 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
641 return -ENOMEM;
642 }
643
644 if (cfio->fault.ft_flags & VM_FAULT_RETRY)
645 return -EAGAIN;
646
647 CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
648 return -EINVAL;
649 }
650
651
652 static int vvp_io_fault_start(const struct lu_env *env,
653 const struct cl_io_slice *ios)
654 {
655 struct vvp_io *vio = cl2vvp_io(env, ios);
656 struct cl_io *io = ios->cis_io;
657 struct cl_object *obj = io->ci_obj;
658 struct inode *inode = ccc_object_inode(obj);
659 struct cl_fault_io *fio = &io->u.ci_fault;
660 struct vvp_fault_io *cfio = &vio->u.fault;
661 loff_t offset;
662 int result = 0;
663 struct page *vmpage = NULL;
664 struct cl_page *page;
665 loff_t size;
666 pgoff_t last; /* last page in a file data region */
667
668 if (fio->ft_executable &&
669 LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
670 CWARN("binary "DFID
671 " changed while waiting for the page fault lock\n",
672 PFID(lu_object_fid(&obj->co_lu)));
673
674 /* offset of the last byte on the page */
675 offset = cl_offset(obj, fio->ft_index + 1) - 1;
676 LASSERT(cl_index(obj, offset) == fio->ft_index);
677 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
678 if (result != 0)
679 return result;
680
681 /* must return locked page */
682 if (fio->ft_mkwrite) {
683 LASSERT(cfio->ft_vmpage != NULL);
684 lock_page(cfio->ft_vmpage);
685 } else {
686 result = vvp_io_kernel_fault(cfio);
687 if (result != 0)
688 return result;
689 }
690
691 vmpage = cfio->ft_vmpage;
692 LASSERT(PageLocked(vmpage));
693
694 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
695 ll_invalidate_page(vmpage);
696
697 size = i_size_read(inode);
698 /* Though we have already held a cl_lock upon this page, but
699 * it still can be truncated locally. */
700 if (unlikely((vmpage->mapping != inode->i_mapping) ||
701 (page_offset(vmpage) > size))) {
702 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
703
704 /* return +1 to stop cl_io_loop() and ll_fault() will catch
705 * and retry. */
706 GOTO(out, result = +1);
707 }
708
709
710 if (fio->ft_mkwrite ) {
711 pgoff_t last_index;
712 /*
713 * Capture the size while holding the lli_trunc_sem from above
714 * we want to make sure that we complete the mkwrite action
715 * while holding this lock. We need to make sure that we are
716 * not past the end of the file.
717 */
718 last_index = cl_index(obj, size - 1);
719 if (last_index < fio->ft_index) {
720 CDEBUG(D_PAGE,
721 "llite: mkwrite and truncate race happened: "
722 "%p: 0x%lx 0x%lx\n",
723 vmpage->mapping,fio->ft_index,last_index);
724 /*
725 * We need to return if we are
726 * passed the end of the file. This will propagate
727 * up the call stack to ll_page_mkwrite where
728 * we will return VM_FAULT_NOPAGE. Any non-negative
729 * value returned here will be silently
730 * converted to 0. If the vmpage->mapping is null
731 * the error code would be converted back to ENODATA
732 * in ll_page_mkwrite0. Thus we return -ENODATA
733 * to handle both cases
734 */
735 GOTO(out, result = -ENODATA);
736 }
737 }
738
739 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
740 if (IS_ERR(page))
741 GOTO(out, result = PTR_ERR(page));
742
743 /* if page is going to be written, we should add this page into cache
744 * earlier. */
745 if (fio->ft_mkwrite) {
746 wait_on_page_writeback(vmpage);
747 if (set_page_dirty(vmpage)) {
748 struct ccc_page *cp;
749
750 /* vvp_page_assume() calls wait_on_page_writeback(). */
751 cl_page_assume(env, io, page);
752
753 cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
754 vvp_write_pending(cl2ccc(obj), cp);
755
756 /* Do not set Dirty bit here so that in case IO is
757 * started before the page is really made dirty, we
758 * still have chance to detect it. */
759 result = cl_page_cache_add(env, io, page, CRT_WRITE);
760 LASSERT(cl_page_is_owned(page, io));
761
762 vmpage = NULL;
763 if (result < 0) {
764 cl_page_unmap(env, io, page);
765 cl_page_discard(env, io, page);
766 cl_page_disown(env, io, page);
767
768 cl_page_put(env, page);
769
770 /* we're in big trouble, what can we do now? */
771 if (result == -EDQUOT)
772 result = -ENOSPC;
773 GOTO(out, result);
774 } else
775 cl_page_disown(env, io, page);
776 }
777 }
778
779 last = cl_index(obj, size - 1);
780 /*
781 * The ft_index is only used in the case of
782 * a mkwrite action. We need to check
783 * our assertions are correct, since
784 * we should have caught this above
785 */
786 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
787 if (fio->ft_index == last)
788 /*
789 * Last page is mapped partially.
790 */
791 fio->ft_nob = size - cl_offset(obj, fio->ft_index);
792 else
793 fio->ft_nob = cl_page_size(obj);
794
795 lu_ref_add(&page->cp_reference, "fault", io);
796 fio->ft_page = page;
797
798 out:
799 /* return unlocked vmpage to avoid deadlocking */
800 if (vmpage != NULL)
801 unlock_page(vmpage);
802 cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
803 return result;
804 }
805
806 static int vvp_io_fsync_start(const struct lu_env *env,
807 const struct cl_io_slice *ios)
808 {
809 /* we should mark TOWRITE bit to each dirty page in radix tree to
810 * verify pages have been written, but this is difficult because of
811 * race. */
812 return 0;
813 }
814
815 static int vvp_io_read_page(const struct lu_env *env,
816 const struct cl_io_slice *ios,
817 const struct cl_page_slice *slice)
818 {
819 struct cl_io *io = ios->cis_io;
820 struct cl_object *obj = slice->cpl_obj;
821 struct ccc_page *cp = cl2ccc_page(slice);
822 struct cl_page *page = slice->cpl_page;
823 struct inode *inode = ccc_object_inode(obj);
824 struct ll_sb_info *sbi = ll_i2sbi(inode);
825 struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
826 struct ll_readahead_state *ras = &fd->fd_ras;
827 struct page *vmpage = cp->cpg_page;
828 struct cl_2queue *queue = &io->ci_queue;
829 int rc;
830
831 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
832 LASSERT(slice->cpl_obj == obj);
833
834 if (sbi->ll_ra_info.ra_max_pages_per_file &&
835 sbi->ll_ra_info.ra_max_pages)
836 ras_update(sbi, inode, ras, page->cp_index,
837 cp->cpg_defer_uptodate);
838
839 /* Sanity check whether the page is protected by a lock. */
840 rc = cl_page_is_under_lock(env, io, page);
841 if (rc != -EBUSY) {
842 CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
843 rc == -ENODATA ? "without a lock" :
844 "match failed", rc);
845 if (rc != -ENODATA)
846 return rc;
847 }
848
849 if (cp->cpg_defer_uptodate) {
850 cp->cpg_ra_used = 1;
851 cl_page_export(env, page, 1);
852 }
853 /*
854 * Add page into the queue even when it is marked uptodate above.
855 * this will unlock it automatically as part of cl_page_list_disown().
856 */
857 cl_2queue_add(queue, page);
858 if (sbi->ll_ra_info.ra_max_pages_per_file &&
859 sbi->ll_ra_info.ra_max_pages)
860 ll_readahead(env, io, ras,
861 vmpage->mapping, &queue->c2_qin, fd->fd_flags);
862
863 return 0;
864 }
865
866 static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
867 struct cl_page *page, struct ccc_page *cp,
868 enum cl_req_type crt)
869 {
870 struct cl_2queue *queue;
871 int result;
872
873 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
874
875 queue = &io->ci_queue;
876 cl_2queue_init_page(queue, page);
877
878 result = cl_io_submit_sync(env, io, crt, queue, 0);
879 LASSERT(cl_page_is_owned(page, io));
880
881 if (crt == CRT_READ)
882 /*
883 * in CRT_WRITE case page is left locked even in case of
884 * error.
885 */
886 cl_page_list_disown(env, io, &queue->c2_qin);
887 cl_2queue_fini(env, queue);
888
889 return result;
890 }
891
892 /**
893 * Prepare partially written-to page for a write.
894 */
895 static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
896 struct cl_object *obj, struct cl_page *pg,
897 struct ccc_page *cp,
898 unsigned from, unsigned to)
899 {
900 struct cl_attr *attr = ccc_env_thread_attr(env);
901 loff_t offset = cl_offset(obj, pg->cp_index);
902 int result;
903
904 cl_object_attr_lock(obj);
905 result = cl_object_attr_get(env, obj, attr);
906 cl_object_attr_unlock(obj);
907 if (result == 0) {
908 /*
909 * If are writing to a new page, no need to read old data.
910 * The extent locking will have updated the KMS, and for our
911 * purposes here we can treat it like i_size.
912 */
913 if (attr->cat_kms <= offset) {
914 char *kaddr = kmap_atomic(cp->cpg_page);
915
916 memset(kaddr, 0, cl_page_size(obj));
917 kunmap_atomic(kaddr);
918 } else if (cp->cpg_defer_uptodate)
919 cp->cpg_ra_used = 1;
920 else
921 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
922 /*
923 * In older implementations, obdo_refresh_inode is called here
924 * to update the inode because the write might modify the
925 * object info at OST. However, this has been proven useless,
926 * since LVB functions will be called when user space program
927 * tries to retrieve inode attribute. Also, see bug 15909 for
928 * details. -jay
929 */
930 if (result == 0)
931 cl_page_export(env, pg, 1);
932 }
933 return result;
934 }
935
936 static int vvp_io_prepare_write(const struct lu_env *env,
937 const struct cl_io_slice *ios,
938 const struct cl_page_slice *slice,
939 unsigned from, unsigned to)
940 {
941 struct cl_object *obj = slice->cpl_obj;
942 struct ccc_page *cp = cl2ccc_page(slice);
943 struct cl_page *pg = slice->cpl_page;
944 struct page *vmpage = cp->cpg_page;
945
946 int result;
947
948 LINVRNT(cl_page_is_vmlocked(env, pg));
949 LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
950
951 result = 0;
952
953 CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
954 if (!PageUptodate(vmpage)) {
955 /*
956 * We're completely overwriting an existing page, so _don't_
957 * set it up to date until commit_write
958 */
959 if (from == 0 && to == PAGE_CACHE_SIZE) {
960 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
961 POISON_PAGE(page, 0x11);
962 } else
963 result = vvp_io_prepare_partial(env, ios->cis_io, obj,
964 pg, cp, from, to);
965 } else
966 CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
967 return result;
968 }
969
970 static int vvp_io_commit_write(const struct lu_env *env,
971 const struct cl_io_slice *ios,
972 const struct cl_page_slice *slice,
973 unsigned from, unsigned to)
974 {
975 struct cl_object *obj = slice->cpl_obj;
976 struct cl_io *io = ios->cis_io;
977 struct ccc_page *cp = cl2ccc_page(slice);
978 struct cl_page *pg = slice->cpl_page;
979 struct inode *inode = ccc_object_inode(obj);
980 struct ll_sb_info *sbi = ll_i2sbi(inode);
981 struct ll_inode_info *lli = ll_i2info(inode);
982 struct page *vmpage = cp->cpg_page;
983
984 int result;
985 int tallyop;
986 loff_t size;
987
988 LINVRNT(cl_page_is_vmlocked(env, pg));
989 LASSERT(vmpage->mapping->host == inode);
990
991 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
992 CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
993
994 /*
995 * queue a write for some time in the future the first time we
996 * dirty the page.
997 *
998 * This is different from what other file systems do: they usually
999 * just mark page (and some of its buffers) dirty and rely on
1000 * balance_dirty_pages() to start a write-back. Lustre wants write-back
1001 * to be started earlier for the following reasons:
1002 *
1003 * (1) with a large number of clients we need to limit the amount
1004 * of cached data on the clients a lot;
1005 *
1006 * (2) large compute jobs generally want compute-only then io-only
1007 * and the IO should complete as quickly as possible;
1008 *
1009 * (3) IO is batched up to the RPC size and is async until the
1010 * client max cache is hit
1011 * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1012 *
1013 */
1014 if (!PageDirty(vmpage)) {
1015 tallyop = LPROC_LL_DIRTY_MISSES;
1016 result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1017 if (result == 0) {
1018 /* page was added into cache successfully. */
1019 set_page_dirty(vmpage);
1020 vvp_write_pending(cl2ccc(obj), cp);
1021 } else if (result == -EDQUOT) {
1022 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
1023 bool need_clip = true;
1024
1025 /*
1026 * Client ran out of disk space grant. Possible
1027 * strategies are:
1028 *
1029 * (a) do a sync write, renewing grant;
1030 *
1031 * (b) stop writing on this stripe, switch to the
1032 * next one.
1033 *
1034 * (b) is a part of "parallel io" design that is the
1035 * ultimate goal. (a) is what "old" client did, and
1036 * what the new code continues to do for the time
1037 * being.
1038 */
1039 if (last_index > pg->cp_index) {
1040 to = PAGE_CACHE_SIZE;
1041 need_clip = false;
1042 } else if (last_index == pg->cp_index) {
1043 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1044 if (to < size_to)
1045 to = size_to;
1046 }
1047 if (need_clip)
1048 cl_page_clip(env, pg, 0, to);
1049 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1050 if (result)
1051 CERROR("Write page %lu of inode %p failed %d\n",
1052 pg->cp_index, inode, result);
1053 }
1054 } else {
1055 tallyop = LPROC_LL_DIRTY_HITS;
1056 result = 0;
1057 }
1058 ll_stats_ops_tally(sbi, tallyop, 1);
1059
1060 /* Inode should be marked DIRTY even if no new page was marked DIRTY
1061 * because page could have been not flushed between 2 modifications.
1062 * It is important the file is marked DIRTY as soon as the I/O is done
1063 * Indeed, when cache is flushed, file could be already closed and it
1064 * is too late to warn the MDT.
1065 * It is acceptable that file is marked DIRTY even if I/O is dropped
1066 * for some reasons before being flushed to OST.
1067 */
1068 if (result == 0) {
1069 spin_lock(&lli->lli_lock);
1070 lli->lli_flags |= LLIF_DATA_MODIFIED;
1071 spin_unlock(&lli->lli_lock);
1072 }
1073
1074 size = cl_offset(obj, pg->cp_index) + to;
1075
1076 ll_inode_size_lock(inode);
1077 if (result == 0) {
1078 if (size > i_size_read(inode)) {
1079 cl_isize_write_nolock(inode, size);
1080 CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1081 PFID(lu_object_fid(&obj->co_lu)),
1082 (unsigned long)size);
1083 }
1084 cl_page_export(env, pg, 1);
1085 } else {
1086 if (size > i_size_read(inode))
1087 cl_page_discard(env, io, pg);
1088 }
1089 ll_inode_size_unlock(inode);
1090 return result;
1091 }
1092
1093 static const struct cl_io_operations vvp_io_ops = {
1094 .op = {
1095 [CIT_READ] = {
1096 .cio_fini = vvp_io_read_fini,
1097 .cio_lock = vvp_io_read_lock,
1098 .cio_start = vvp_io_read_start,
1099 .cio_advance = ccc_io_advance
1100 },
1101 [CIT_WRITE] = {
1102 .cio_fini = vvp_io_fini,
1103 .cio_lock = vvp_io_write_lock,
1104 .cio_start = vvp_io_write_start,
1105 .cio_advance = ccc_io_advance
1106 },
1107 [CIT_SETATTR] = {
1108 .cio_fini = vvp_io_setattr_fini,
1109 .cio_iter_init = vvp_io_setattr_iter_init,
1110 .cio_lock = vvp_io_setattr_lock,
1111 .cio_start = vvp_io_setattr_start,
1112 .cio_end = vvp_io_setattr_end
1113 },
1114 [CIT_FAULT] = {
1115 .cio_fini = vvp_io_fault_fini,
1116 .cio_iter_init = vvp_io_fault_iter_init,
1117 .cio_lock = vvp_io_fault_lock,
1118 .cio_start = vvp_io_fault_start,
1119 .cio_end = ccc_io_end
1120 },
1121 [CIT_FSYNC] = {
1122 .cio_start = vvp_io_fsync_start,
1123 .cio_fini = vvp_io_fini
1124 },
1125 [CIT_MISC] = {
1126 .cio_fini = vvp_io_fini
1127 }
1128 },
1129 .cio_read_page = vvp_io_read_page,
1130 .cio_prepare_write = vvp_io_prepare_write,
1131 .cio_commit_write = vvp_io_commit_write
1132 };
1133
1134 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1135 struct cl_io *io)
1136 {
1137 struct vvp_io *vio = vvp_env_io(env);
1138 struct ccc_io *cio = ccc_env_io(env);
1139 struct inode *inode = ccc_object_inode(obj);
1140 int result;
1141
1142 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
1143
1144 CDEBUG(D_VFSTRACE, DFID
1145 " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1146 PFID(lu_object_fid(&obj->co_lu)),
1147 io->ci_ignore_layout, io->ci_verify_layout,
1148 cio->cui_layout_gen, io->ci_restore_needed);
1149
1150 CL_IO_SLICE_CLEAN(cio, cui_cl);
1151 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1152 vio->cui_ra_window_set = 0;
1153 result = 0;
1154 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1155 size_t count;
1156 struct ll_inode_info *lli = ll_i2info(inode);
1157
1158 count = io->u.ci_rw.crw_count;
1159 /* "If nbyte is 0, read() will return 0 and have no other
1160 * results." -- Single Unix Spec */
1161 if (count == 0)
1162 result = 1;
1163 else
1164 cio->cui_tot_count = count;
1165
1166 /* for read/write, we store the jobid in the inode, and
1167 * it'll be fetched by osc when building RPC.
1168 *
1169 * it's not accurate if the file is shared by different
1170 * jobs.
1171 */
1172 lustre_get_jobid(lli->lli_jobid);
1173 } else if (io->ci_type == CIT_SETATTR) {
1174 if (!cl_io_is_trunc(io))
1175 io->ci_lockreq = CILR_MANDATORY;
1176 }
1177
1178 /* ignore layout change for generic CIT_MISC but not for glimpse.
1179 * io context for glimpse must set ci_verify_layout to true,
1180 * see cl_glimpse_size0() for details. */
1181 if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1182 io->ci_ignore_layout = 1;
1183
1184 /* Enqueue layout lock and get layout version. We need to do this
1185 * even for operations requiring to open file, such as read and write,
1186 * because it might not grant layout lock in IT_OPEN. */
1187 if (result == 0 && !io->ci_ignore_layout) {
1188 result = ll_layout_refresh(inode, &cio->cui_layout_gen);
1189 if (result == -ENOENT)
1190 /* If the inode on MDS has been removed, but the objects
1191 * on OSTs haven't been destroyed (async unlink), layout
1192 * fetch will return -ENOENT, we'd ignore this error
1193 * and continue with dirty flush. LU-3230. */
1194 result = 0;
1195 if (result < 0)
1196 CERROR("%s: refresh file layout " DFID " error %d.\n",
1197 ll_get_fsname(inode->i_sb, NULL, 0),
1198 PFID(lu_object_fid(&obj->co_lu)), result);
1199 }
1200
1201 return result;
1202 }
1203
1204 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1205 const struct cl_io_slice *slice)
1206 {
1207 /* Calling just for assertion */
1208 cl2ccc_io(env, slice);
1209 return vvp_env_io(env);
1210 }
This page took 0.072692 seconds and 5 git commands to generate.