Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * Implementation of cl_io for VVP layer. | |
37 | * | |
38 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
39 | * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com> | |
40 | */ | |
41 | ||
42 | #define DEBUG_SUBSYSTEM S_LLITE | |
43 | ||
44 | ||
45 | #include <obd.h> | |
46 | #include <lustre_lite.h> | |
47 | ||
48 | #include "vvp_internal.h" | |
49 | ||
50 | static struct vvp_io *cl2vvp_io(const struct lu_env *env, | |
51 | const struct cl_io_slice *slice); | |
52 | ||
53 | /** | |
54 | * True, if \a io is a normal io, False for sendfile() / splice_{read|write} | |
55 | */ | |
56 | int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) | |
57 | { | |
58 | struct vvp_io *vio = vvp_env_io(env); | |
59 | ||
60 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
61 | ||
62 | return vio->cui_io_subtype == IO_NORMAL; | |
63 | } | |
64 | ||
65 | /** | |
66 | * For swapping layout. The file's layout may have changed. | |
67 | * To avoid populating pages to a wrong stripe, we have to verify the | |
68 | * correctness of layout. It works because swapping layout processes | |
69 | * have to acquire group lock. | |
70 | */ | |
71 | static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, | |
72 | struct inode *inode) | |
73 | { | |
74 | struct ll_inode_info *lli = ll_i2info(inode); | |
75 | struct ccc_io *cio = ccc_env_io(env); | |
76 | bool rc = true; | |
77 | ||
78 | switch (io->ci_type) { | |
79 | case CIT_READ: | |
80 | case CIT_WRITE: | |
81 | /* don't need lock here to check lli_layout_gen as we have held | |
82 | * extent lock and GROUP lock has to hold to swap layout */ | |
83 | if (lli->lli_layout_gen != cio->cui_layout_gen) { | |
84 | io->ci_need_restart = 1; | |
85 | /* this will return application a short read/write */ | |
86 | io->ci_continue = 0; | |
87 | rc = false; | |
88 | } | |
89 | case CIT_FAULT: | |
90 | /* fault is okay because we've already had a page. */ | |
91 | default: | |
92 | break; | |
93 | } | |
94 | ||
95 | return rc; | |
96 | } | |
97 | ||
98 | /***************************************************************************** | |
99 | * | |
100 | * io operations. | |
101 | * | |
102 | */ | |
103 | ||
104 | static int vvp_io_fault_iter_init(const struct lu_env *env, | |
105 | const struct cl_io_slice *ios) | |
106 | { | |
107 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
108 | struct inode *inode = ccc_object_inode(ios->cis_obj); | |
109 | ||
110 | LASSERT(inode == | |
111 | cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); | |
112 | vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); | |
113 | return 0; | |
114 | } | |
115 | ||
116 | static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
117 | { | |
118 | struct cl_io *io = ios->cis_io; | |
119 | struct cl_object *obj = io->ci_obj; | |
120 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
121 | ||
122 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
123 | ||
124 | CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n", | |
125 | io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen); | |
126 | ||
127 | if (!io->ci_ignore_layout && io->ci_verify_layout) { | |
128 | __u32 gen = 0; | |
129 | ||
130 | /* check layout version */ | |
131 | ll_layout_refresh(ccc_object_inode(obj), &gen); | |
132 | io->ci_need_restart = cio->cui_layout_gen != gen; | |
133 | if (io->ci_need_restart) | |
134 | CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n", | |
135 | cio->cui_layout_gen, gen); | |
136 | } | |
137 | } | |
138 | ||
139 | static void vvp_io_fault_fini(const struct lu_env *env, | |
140 | const struct cl_io_slice *ios) | |
141 | { | |
142 | struct cl_io *io = ios->cis_io; | |
143 | struct cl_page *page = io->u.ci_fault.ft_page; | |
144 | ||
145 | CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); | |
146 | ||
147 | if (page != NULL) { | |
148 | lu_ref_del(&page->cp_reference, "fault", io); | |
149 | cl_page_put(env, page); | |
150 | io->u.ci_fault.ft_page = NULL; | |
151 | } | |
152 | vvp_io_fini(env, ios); | |
153 | } | |
154 | ||
155 | enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma) | |
156 | { | |
157 | /* | |
158 | * we only want to hold PW locks if the mmap() can generate | |
159 | * writes back to the file and that only happens in shared | |
160 | * writable vmas | |
161 | */ | |
162 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) | |
163 | return CLM_WRITE; | |
164 | return CLM_READ; | |
165 | } | |
166 | ||
167 | static int vvp_mmap_locks(const struct lu_env *env, | |
168 | struct ccc_io *vio, struct cl_io *io) | |
169 | { | |
170 | struct ccc_thread_info *cti = ccc_env_info(env); | |
171 | struct mm_struct *mm = current->mm; | |
172 | struct vm_area_struct *vma; | |
173 | struct cl_lock_descr *descr = &cti->cti_descr; | |
174 | ldlm_policy_data_t policy; | |
175 | unsigned long addr; | |
176 | unsigned long seg; | |
177 | ssize_t count; | |
178 | int result; | |
179 | ENTRY; | |
180 | ||
181 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
182 | ||
183 | if (!cl_is_normalio(env, io)) | |
184 | RETURN(0); | |
185 | ||
186 | if (vio->cui_iov == NULL) /* nfs or loop back device write */ | |
187 | RETURN(0); | |
188 | ||
189 | /* No MM (e.g. NFS)? No vmas too. */ | |
190 | if (mm == NULL) | |
191 | RETURN(0); | |
192 | ||
193 | for (seg = 0; seg < vio->cui_nrsegs; seg++) { | |
194 | const struct iovec *iv = &vio->cui_iov[seg]; | |
195 | ||
196 | addr = (unsigned long)iv->iov_base; | |
197 | count = iv->iov_len; | |
198 | if (count == 0) | |
199 | continue; | |
200 | ||
201 | count += addr & (~CFS_PAGE_MASK); | |
202 | addr &= CFS_PAGE_MASK; | |
203 | ||
204 | down_read(&mm->mmap_sem); | |
205 | while((vma = our_vma(mm, addr, count)) != NULL) { | |
206 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | |
207 | int flags = CEF_MUST; | |
208 | ||
209 | if (ll_file_nolock(vma->vm_file)) { | |
210 | /* | |
211 | * For no lock case, a lockless lock will be | |
212 | * generated. | |
213 | */ | |
214 | flags = CEF_NEVER; | |
215 | } | |
216 | ||
217 | /* | |
218 | * XXX: Required lock mode can be weakened: CIT_WRITE | |
219 | * io only ever reads user level buffer, and CIT_READ | |
220 | * only writes on it. | |
221 | */ | |
222 | policy_from_vma(&policy, vma, addr, count); | |
223 | descr->cld_mode = vvp_mode_from_vma(vma); | |
224 | descr->cld_obj = ll_i2info(inode)->lli_clob; | |
225 | descr->cld_start = cl_index(descr->cld_obj, | |
226 | policy.l_extent.start); | |
227 | descr->cld_end = cl_index(descr->cld_obj, | |
228 | policy.l_extent.end); | |
229 | descr->cld_enq_flags = flags; | |
230 | result = cl_io_lock_alloc_add(env, io, descr); | |
231 | ||
232 | CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", | |
233 | descr->cld_mode, descr->cld_start, | |
234 | descr->cld_end); | |
235 | ||
236 | if (result < 0) | |
237 | RETURN(result); | |
238 | ||
239 | if (vma->vm_end - addr >= count) | |
240 | break; | |
241 | ||
242 | count -= vma->vm_end - addr; | |
243 | addr = vma->vm_end; | |
244 | } | |
245 | up_read(&mm->mmap_sem); | |
246 | } | |
247 | RETURN(0); | |
248 | } | |
249 | ||
250 | static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, | |
251 | enum cl_lock_mode mode, loff_t start, loff_t end) | |
252 | { | |
253 | struct ccc_io *cio = ccc_env_io(env); | |
254 | int result; | |
255 | int ast_flags = 0; | |
256 | ||
257 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
258 | ENTRY; | |
259 | ||
260 | ccc_io_update_iov(env, cio, io); | |
261 | ||
262 | if (io->u.ci_rw.crw_nonblock) | |
263 | ast_flags |= CEF_NONBLOCK; | |
264 | result = vvp_mmap_locks(env, cio, io); | |
265 | if (result == 0) | |
266 | result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); | |
267 | RETURN(result); | |
268 | } | |
269 | ||
270 | static int vvp_io_read_lock(const struct lu_env *env, | |
271 | const struct cl_io_slice *ios) | |
272 | { | |
273 | struct cl_io *io = ios->cis_io; | |
274 | struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj)); | |
275 | int result; | |
276 | ||
277 | ENTRY; | |
278 | /* XXX: Layer violation, we shouldn't see lsm at llite level. */ | |
279 | if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */ | |
280 | result = vvp_io_rw_lock(env, io, CLM_READ, | |
281 | io->u.ci_rd.rd.crw_pos, | |
282 | io->u.ci_rd.rd.crw_pos + | |
283 | io->u.ci_rd.rd.crw_count - 1); | |
284 | else | |
285 | result = 0; | |
286 | RETURN(result); | |
287 | } | |
288 | ||
289 | static int vvp_io_fault_lock(const struct lu_env *env, | |
290 | const struct cl_io_slice *ios) | |
291 | { | |
292 | struct cl_io *io = ios->cis_io; | |
293 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
294 | /* | |
295 | * XXX LDLM_FL_CBPENDING | |
296 | */ | |
297 | return ccc_io_one_lock_index | |
298 | (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), | |
299 | io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); | |
300 | } | |
301 | ||
302 | static int vvp_io_write_lock(const struct lu_env *env, | |
303 | const struct cl_io_slice *ios) | |
304 | { | |
305 | struct cl_io *io = ios->cis_io; | |
306 | loff_t start; | |
307 | loff_t end; | |
308 | ||
309 | if (io->u.ci_wr.wr_append) { | |
310 | start = 0; | |
311 | end = OBD_OBJECT_EOF; | |
312 | } else { | |
313 | start = io->u.ci_wr.wr.crw_pos; | |
314 | end = start + io->u.ci_wr.wr.crw_count - 1; | |
315 | } | |
316 | return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); | |
317 | } | |
318 | ||
319 | static int vvp_io_setattr_iter_init(const struct lu_env *env, | |
320 | const struct cl_io_slice *ios) | |
321 | { | |
322 | return 0; | |
323 | } | |
324 | ||
325 | /** | |
326 | * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io. | |
327 | * | |
328 | * Handles "lockless io" mode when extent locking is done by server. | |
329 | */ | |
330 | static int vvp_io_setattr_lock(const struct lu_env *env, | |
331 | const struct cl_io_slice *ios) | |
332 | { | |
333 | struct ccc_io *cio = ccc_env_io(env); | |
334 | struct cl_io *io = ios->cis_io; | |
335 | __u64 new_size; | |
336 | __u32 enqflags = 0; | |
337 | ||
338 | if (cl_io_is_trunc(io)) { | |
339 | new_size = io->u.ci_setattr.sa_attr.lvb_size; | |
340 | if (new_size == 0) | |
341 | enqflags = CEF_DISCARD_DATA; | |
342 | } else { | |
343 | if ((io->u.ci_setattr.sa_attr.lvb_mtime >= | |
344 | io->u.ci_setattr.sa_attr.lvb_ctime) || | |
345 | (io->u.ci_setattr.sa_attr.lvb_atime >= | |
346 | io->u.ci_setattr.sa_attr.lvb_ctime)) | |
347 | return 0; | |
348 | new_size = 0; | |
349 | } | |
350 | cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK; | |
351 | return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, | |
352 | new_size, OBD_OBJECT_EOF); | |
353 | } | |
354 | ||
355 | static int vvp_do_vmtruncate(struct inode *inode, size_t size) | |
356 | { | |
357 | int result; | |
358 | /* | |
359 | * Only ll_inode_size_lock is taken at this level. | |
360 | */ | |
361 | ll_inode_size_lock(inode); | |
362 | result = inode_newsize_ok(inode, size); | |
363 | if (result < 0) { | |
364 | ll_inode_size_unlock(inode); | |
365 | return result; | |
366 | } | |
367 | truncate_setsize(inode, size); | |
368 | ll_inode_size_unlock(inode); | |
369 | return result; | |
370 | } | |
371 | ||
372 | static int vvp_io_setattr_trunc(const struct lu_env *env, | |
373 | const struct cl_io_slice *ios, | |
374 | struct inode *inode, loff_t size) | |
375 | { | |
376 | inode_dio_wait(inode); | |
377 | return 0; | |
378 | } | |
379 | ||
380 | static int vvp_io_setattr_time(const struct lu_env *env, | |
381 | const struct cl_io_slice *ios) | |
382 | { | |
383 | struct cl_io *io = ios->cis_io; | |
384 | struct cl_object *obj = io->ci_obj; | |
385 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
386 | int result; | |
387 | unsigned valid = CAT_CTIME; | |
388 | ||
389 | cl_object_attr_lock(obj); | |
390 | attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime; | |
391 | if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) { | |
392 | attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime; | |
393 | valid |= CAT_ATIME; | |
394 | } | |
395 | if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) { | |
396 | attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime; | |
397 | valid |= CAT_MTIME; | |
398 | } | |
399 | result = cl_object_attr_set(env, obj, attr, valid); | |
400 | cl_object_attr_unlock(obj); | |
401 | ||
402 | return result; | |
403 | } | |
404 | ||
405 | static int vvp_io_setattr_start(const struct lu_env *env, | |
406 | const struct cl_io_slice *ios) | |
407 | { | |
408 | struct cl_io *io = ios->cis_io; | |
409 | struct inode *inode = ccc_object_inode(io->ci_obj); | |
410 | ||
411 | mutex_lock(&inode->i_mutex); | |
412 | if (cl_io_is_trunc(io)) | |
413 | return vvp_io_setattr_trunc(env, ios, inode, | |
414 | io->u.ci_setattr.sa_attr.lvb_size); | |
415 | else | |
416 | return vvp_io_setattr_time(env, ios); | |
417 | } | |
418 | ||
419 | static void vvp_io_setattr_end(const struct lu_env *env, | |
420 | const struct cl_io_slice *ios) | |
421 | { | |
422 | struct cl_io *io = ios->cis_io; | |
423 | struct inode *inode = ccc_object_inode(io->ci_obj); | |
424 | ||
425 | if (cl_io_is_trunc(io)) { | |
426 | /* Truncate in memory pages - they must be clean pages | |
427 | * because osc has already notified to destroy osc_extents. */ | |
428 | vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); | |
429 | inode_dio_write_done(inode); | |
430 | } | |
431 | mutex_unlock(&inode->i_mutex); | |
432 | } | |
433 | ||
434 | static void vvp_io_setattr_fini(const struct lu_env *env, | |
435 | const struct cl_io_slice *ios) | |
436 | { | |
437 | vvp_io_fini(env, ios); | |
438 | } | |
439 | ||
440 | static ssize_t lustre_generic_file_read(struct file *file, | |
441 | struct ccc_io *vio, loff_t *ppos) | |
442 | { | |
443 | return generic_file_aio_read(vio->cui_iocb, vio->cui_iov, | |
444 | vio->cui_nrsegs, *ppos); | |
445 | } | |
446 | ||
447 | static ssize_t lustre_generic_file_write(struct file *file, | |
448 | struct ccc_io *vio, loff_t *ppos) | |
449 | { | |
450 | return generic_file_aio_write(vio->cui_iocb, vio->cui_iov, | |
451 | vio->cui_nrsegs, *ppos); | |
452 | } | |
453 | ||
454 | static int vvp_io_read_start(const struct lu_env *env, | |
455 | const struct cl_io_slice *ios) | |
456 | { | |
457 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
458 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
459 | struct cl_io *io = ios->cis_io; | |
460 | struct cl_object *obj = io->ci_obj; | |
461 | struct inode *inode = ccc_object_inode(obj); | |
462 | struct ll_ra_read *bead = &vio->cui_bead; | |
463 | struct file *file = cio->cui_fd->fd_file; | |
464 | ||
465 | int result; | |
466 | loff_t pos = io->u.ci_rd.rd.crw_pos; | |
467 | long cnt = io->u.ci_rd.rd.crw_count; | |
468 | long tot = cio->cui_tot_count; | |
469 | int exceed = 0; | |
470 | ||
471 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
472 | ||
473 | CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); | |
474 | ||
475 | if (!can_populate_pages(env, io, inode)) | |
476 | return 0; | |
477 | ||
478 | result = ccc_prep_size(env, obj, io, pos, tot, &exceed); | |
479 | if (result != 0) | |
480 | return result; | |
481 | else if (exceed != 0) | |
482 | goto out; | |
483 | ||
484 | LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, | |
485 | "Read ino %lu, %lu bytes, offset %lld, size %llu\n", | |
486 | inode->i_ino, cnt, pos, i_size_read(inode)); | |
487 | ||
488 | /* turn off the kernel's read-ahead */ | |
489 | cio->cui_fd->fd_file->f_ra.ra_pages = 0; | |
490 | ||
491 | /* initialize read-ahead window once per syscall */ | |
492 | if (!vio->cui_ra_window_set) { | |
493 | vio->cui_ra_window_set = 1; | |
494 | bead->lrr_start = cl_index(obj, pos); | |
495 | /* | |
496 | * XXX: explicit PAGE_CACHE_SIZE | |
497 | */ | |
498 | bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); | |
499 | ll_ra_read_in(file, bead); | |
500 | } | |
501 | ||
502 | /* BUG: 5972 */ | |
503 | file_accessed(file); | |
504 | switch (vio->cui_io_subtype) { | |
505 | case IO_NORMAL: | |
506 | result = lustre_generic_file_read(file, cio, &pos); | |
507 | break; | |
508 | case IO_SPLICE: | |
509 | result = generic_file_splice_read(file, &pos, | |
510 | vio->u.splice.cui_pipe, cnt, | |
511 | vio->u.splice.cui_flags); | |
512 | /* LU-1109: do splice read stripe by stripe otherwise if it | |
513 | * may make nfsd stuck if this read occupied all internal pipe | |
514 | * buffers. */ | |
515 | io->ci_continue = 0; | |
516 | break; | |
517 | default: | |
518 | CERROR("Wrong IO type %u\n", vio->cui_io_subtype); | |
519 | LBUG(); | |
520 | } | |
521 | ||
522 | out: | |
523 | if (result >= 0) { | |
524 | if (result < cnt) | |
525 | io->ci_continue = 0; | |
526 | io->ci_nob += result; | |
527 | ll_rw_stats_tally(ll_i2sbi(inode), current->pid, | |
528 | cio->cui_fd, pos, result, 0); | |
529 | result = 0; | |
530 | } | |
531 | return result; | |
532 | } | |
533 | ||
534 | static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
535 | { | |
536 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
537 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
538 | ||
539 | if (vio->cui_ra_window_set) | |
540 | ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead); | |
541 | ||
542 | vvp_io_fini(env, ios); | |
543 | } | |
544 | ||
545 | static int vvp_io_write_start(const struct lu_env *env, | |
546 | const struct cl_io_slice *ios) | |
547 | { | |
548 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
549 | struct cl_io *io = ios->cis_io; | |
550 | struct cl_object *obj = io->ci_obj; | |
551 | struct inode *inode = ccc_object_inode(obj); | |
552 | struct file *file = cio->cui_fd->fd_file; | |
553 | ssize_t result = 0; | |
554 | loff_t pos = io->u.ci_wr.wr.crw_pos; | |
555 | size_t cnt = io->u.ci_wr.wr.crw_count; | |
556 | ||
557 | ENTRY; | |
558 | ||
559 | if (!can_populate_pages(env, io, inode)) | |
560 | return 0; | |
561 | ||
562 | if (cl_io_is_append(io)) { | |
563 | /* | |
564 | * PARALLEL IO This has to be changed for parallel IO doing | |
565 | * out-of-order writes. | |
566 | */ | |
567 | pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); | |
568 | cio->cui_iocb->ki_pos = pos; | |
569 | } | |
570 | ||
571 | CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); | |
572 | ||
573 | if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */ | |
574 | result = 0; | |
575 | else | |
576 | result = lustre_generic_file_write(file, cio, &pos); | |
577 | ||
578 | if (result > 0) { | |
579 | if (result < cnt) | |
580 | io->ci_continue = 0; | |
581 | io->ci_nob += result; | |
582 | ll_rw_stats_tally(ll_i2sbi(inode), current->pid, | |
583 | cio->cui_fd, pos, result, 0); | |
584 | result = 0; | |
585 | } | |
586 | RETURN(result); | |
587 | } | |
588 | ||
589 | static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |
590 | { | |
591 | struct vm_fault *vmf = cfio->fault.ft_vmf; | |
592 | ||
593 | cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf); | |
594 | ||
595 | if (vmf->page) { | |
596 | LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n", | |
597 | vmf->virtual_address); | |
598 | if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) { | |
599 | lock_page(vmf->page); | |
600 | cfio->fault.ft_flags &= VM_FAULT_LOCKED; | |
601 | } | |
602 | ||
603 | cfio->ft_vmpage = vmf->page; | |
604 | return 0; | |
605 | } | |
606 | ||
607 | if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { | |
608 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); | |
609 | return -EFAULT; | |
610 | } | |
611 | ||
612 | if (cfio->fault.ft_flags & VM_FAULT_OOM) { | |
613 | CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); | |
614 | return -ENOMEM; | |
615 | } | |
616 | ||
617 | if (cfio->fault.ft_flags & VM_FAULT_RETRY) | |
618 | return -EAGAIN; | |
619 | ||
620 | CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags); | |
621 | return -EINVAL; | |
622 | } | |
623 | ||
624 | ||
625 | static int vvp_io_fault_start(const struct lu_env *env, | |
626 | const struct cl_io_slice *ios) | |
627 | { | |
628 | struct vvp_io *vio = cl2vvp_io(env, ios); | |
629 | struct cl_io *io = ios->cis_io; | |
630 | struct cl_object *obj = io->ci_obj; | |
631 | struct inode *inode = ccc_object_inode(obj); | |
632 | struct cl_fault_io *fio = &io->u.ci_fault; | |
633 | struct vvp_fault_io *cfio = &vio->u.fault; | |
634 | loff_t offset; | |
635 | int result = 0; | |
636 | struct page *vmpage = NULL; | |
637 | struct cl_page *page; | |
638 | loff_t size; | |
639 | pgoff_t last; /* last page in a file data region */ | |
640 | ||
641 | if (fio->ft_executable && | |
642 | LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime) | |
643 | CWARN("binary "DFID | |
644 | " changed while waiting for the page fault lock\n", | |
645 | PFID(lu_object_fid(&obj->co_lu))); | |
646 | ||
647 | /* offset of the last byte on the page */ | |
648 | offset = cl_offset(obj, fio->ft_index + 1) - 1; | |
649 | LASSERT(cl_index(obj, offset) == fio->ft_index); | |
650 | result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); | |
651 | if (result != 0) | |
652 | return result; | |
653 | ||
654 | /* must return locked page */ | |
655 | if (fio->ft_mkwrite) { | |
656 | LASSERT(cfio->ft_vmpage != NULL); | |
657 | lock_page(cfio->ft_vmpage); | |
658 | } else { | |
659 | result = vvp_io_kernel_fault(cfio); | |
660 | if (result != 0) | |
661 | return result; | |
662 | } | |
663 | ||
664 | vmpage = cfio->ft_vmpage; | |
665 | LASSERT(PageLocked(vmpage)); | |
666 | ||
667 | if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) | |
668 | ll_invalidate_page(vmpage); | |
669 | ||
670 | size = i_size_read(inode); | |
671 | /* Though we have already held a cl_lock upon this page, but | |
672 | * it still can be truncated locally. */ | |
673 | if (unlikely((vmpage->mapping != inode->i_mapping) || | |
674 | (page_offset(vmpage) > size))) { | |
675 | CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); | |
676 | ||
677 | /* return +1 to stop cl_io_loop() and ll_fault() will catch | |
678 | * and retry. */ | |
679 | GOTO(out, result = +1); | |
680 | } | |
681 | ||
682 | ||
683 | if (fio->ft_mkwrite ) { | |
684 | pgoff_t last_index; | |
685 | /* | |
686 | * Capture the size while holding the lli_trunc_sem from above | |
687 | * we want to make sure that we complete the mkwrite action | |
688 | * while holding this lock. We need to make sure that we are | |
689 | * not past the end of the file. | |
690 | */ | |
691 | last_index = cl_index(obj, size - 1); | |
692 | if (last_index < fio->ft_index) { | |
693 | CDEBUG(D_PAGE, | |
694 | "llite: mkwrite and truncate race happened: " | |
695 | "%p: 0x%lx 0x%lx\n", | |
696 | vmpage->mapping,fio->ft_index,last_index); | |
697 | /* | |
698 | * We need to return if we are | |
699 | * passed the end of the file. This will propagate | |
700 | * up the call stack to ll_page_mkwrite where | |
701 | * we will return VM_FAULT_NOPAGE. Any non-negative | |
702 | * value returned here will be silently | |
703 | * converted to 0. If the vmpage->mapping is null | |
704 | * the error code would be converted back to ENODATA | |
705 | * in ll_page_mkwrite0. Thus we return -ENODATA | |
706 | * to handle both cases | |
707 | */ | |
708 | GOTO(out, result = -ENODATA); | |
709 | } | |
710 | } | |
711 | ||
712 | page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); | |
713 | if (IS_ERR(page)) | |
714 | GOTO(out, result = PTR_ERR(page)); | |
715 | ||
716 | /* if page is going to be written, we should add this page into cache | |
717 | * earlier. */ | |
718 | if (fio->ft_mkwrite) { | |
719 | wait_on_page_writeback(vmpage); | |
720 | if (set_page_dirty(vmpage)) { | |
721 | struct ccc_page *cp; | |
722 | ||
723 | /* vvp_page_assume() calls wait_on_page_writeback(). */ | |
724 | cl_page_assume(env, io, page); | |
725 | ||
726 | cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); | |
727 | vvp_write_pending(cl2ccc(obj), cp); | |
728 | ||
729 | /* Do not set Dirty bit here so that in case IO is | |
730 | * started before the page is really made dirty, we | |
731 | * still have chance to detect it. */ | |
732 | result = cl_page_cache_add(env, io, page, CRT_WRITE); | |
733 | LASSERT(cl_page_is_owned(page, io)); | |
734 | ||
735 | vmpage = NULL; | |
736 | if (result < 0) { | |
737 | cl_page_unmap(env, io, page); | |
738 | cl_page_discard(env, io, page); | |
739 | cl_page_disown(env, io, page); | |
740 | ||
741 | cl_page_put(env, page); | |
742 | ||
743 | /* we're in big trouble, what can we do now? */ | |
744 | if (result == -EDQUOT) | |
745 | result = -ENOSPC; | |
746 | GOTO(out, result); | |
747 | } else | |
748 | cl_page_disown(env, io, page); | |
749 | } | |
750 | } | |
751 | ||
752 | last = cl_index(obj, size - 1); | |
753 | /* | |
754 | * The ft_index is only used in the case of | |
755 | * a mkwrite action. We need to check | |
756 | * our assertions are correct, since | |
757 | * we should have caught this above | |
758 | */ | |
759 | LASSERT(!fio->ft_mkwrite || fio->ft_index <= last); | |
760 | if (fio->ft_index == last) | |
761 | /* | |
762 | * Last page is mapped partially. | |
763 | */ | |
764 | fio->ft_nob = size - cl_offset(obj, fio->ft_index); | |
765 | else | |
766 | fio->ft_nob = cl_page_size(obj); | |
767 | ||
768 | lu_ref_add(&page->cp_reference, "fault", io); | |
769 | fio->ft_page = page; | |
770 | EXIT; | |
771 | ||
772 | out: | |
773 | /* return unlocked vmpage to avoid deadlocking */ | |
774 | if (vmpage != NULL) | |
775 | unlock_page(vmpage); | |
776 | cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; | |
777 | return result; | |
778 | } | |
779 | ||
780 | static int vvp_io_fsync_start(const struct lu_env *env, | |
781 | const struct cl_io_slice *ios) | |
782 | { | |
783 | /* we should mark TOWRITE bit to each dirty page in radix tree to | |
784 | * verify pages have been written, but this is difficult because of | |
785 | * race. */ | |
786 | return 0; | |
787 | } | |
788 | ||
789 | static int vvp_io_read_page(const struct lu_env *env, | |
790 | const struct cl_io_slice *ios, | |
791 | const struct cl_page_slice *slice) | |
792 | { | |
793 | struct cl_io *io = ios->cis_io; | |
794 | struct cl_object *obj = slice->cpl_obj; | |
795 | struct ccc_page *cp = cl2ccc_page(slice); | |
796 | struct cl_page *page = slice->cpl_page; | |
797 | struct inode *inode = ccc_object_inode(obj); | |
798 | struct ll_sb_info *sbi = ll_i2sbi(inode); | |
799 | struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; | |
800 | struct ll_readahead_state *ras = &fd->fd_ras; | |
801 | struct page *vmpage = cp->cpg_page; | |
802 | struct cl_2queue *queue = &io->ci_queue; | |
803 | int rc; | |
804 | ||
805 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
806 | LASSERT(slice->cpl_obj == obj); | |
807 | ||
808 | ENTRY; | |
809 | ||
810 | if (sbi->ll_ra_info.ra_max_pages_per_file && | |
811 | sbi->ll_ra_info.ra_max_pages) | |
812 | ras_update(sbi, inode, ras, page->cp_index, | |
813 | cp->cpg_defer_uptodate); | |
814 | ||
815 | /* Sanity check whether the page is protected by a lock. */ | |
816 | rc = cl_page_is_under_lock(env, io, page); | |
817 | if (rc != -EBUSY) { | |
818 | CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n", | |
819 | rc == -ENODATA ? "without a lock" : | |
820 | "match failed", rc); | |
821 | if (rc != -ENODATA) | |
822 | RETURN(rc); | |
823 | } | |
824 | ||
825 | if (cp->cpg_defer_uptodate) { | |
826 | cp->cpg_ra_used = 1; | |
827 | cl_page_export(env, page, 1); | |
828 | } | |
829 | /* | |
830 | * Add page into the queue even when it is marked uptodate above. | |
831 | * this will unlock it automatically as part of cl_page_list_disown(). | |
832 | */ | |
833 | cl_2queue_add(queue, page); | |
834 | if (sbi->ll_ra_info.ra_max_pages_per_file && | |
835 | sbi->ll_ra_info.ra_max_pages) | |
836 | ll_readahead(env, io, ras, | |
837 | vmpage->mapping, &queue->c2_qin, fd->fd_flags); | |
838 | ||
839 | RETURN(0); | |
840 | } | |
841 | ||
842 | static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, | |
843 | struct cl_page *page, struct ccc_page *cp, | |
844 | enum cl_req_type crt) | |
845 | { | |
846 | struct cl_2queue *queue; | |
847 | int result; | |
848 | ||
849 | LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); | |
850 | ||
851 | queue = &io->ci_queue; | |
852 | cl_2queue_init_page(queue, page); | |
853 | ||
854 | result = cl_io_submit_sync(env, io, crt, queue, 0); | |
855 | LASSERT(cl_page_is_owned(page, io)); | |
856 | ||
857 | if (crt == CRT_READ) | |
858 | /* | |
859 | * in CRT_WRITE case page is left locked even in case of | |
860 | * error. | |
861 | */ | |
862 | cl_page_list_disown(env, io, &queue->c2_qin); | |
863 | cl_2queue_fini(env, queue); | |
864 | ||
865 | return result; | |
866 | } | |
867 | ||
868 | /** | |
869 | * Prepare partially written-to page for a write. | |
870 | */ | |
871 | static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, | |
872 | struct cl_object *obj, struct cl_page *pg, | |
873 | struct ccc_page *cp, | |
874 | unsigned from, unsigned to) | |
875 | { | |
876 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
877 | loff_t offset = cl_offset(obj, pg->cp_index); | |
878 | int result; | |
879 | ||
880 | cl_object_attr_lock(obj); | |
881 | result = cl_object_attr_get(env, obj, attr); | |
882 | cl_object_attr_unlock(obj); | |
883 | if (result == 0) { | |
884 | /* | |
885 | * If are writing to a new page, no need to read old data. | |
886 | * The extent locking will have updated the KMS, and for our | |
887 | * purposes here we can treat it like i_size. | |
888 | */ | |
889 | if (attr->cat_kms <= offset) { | |
5e8ebf13 | 890 | char *kaddr = kmap_atomic(cp->cpg_page); |
d7e09d03 PT |
891 | |
892 | memset(kaddr, 0, cl_page_size(obj)); | |
5e8ebf13 | 893 | kunmap_atomic(kaddr); |
d7e09d03 PT |
894 | } else if (cp->cpg_defer_uptodate) |
895 | cp->cpg_ra_used = 1; | |
896 | else | |
897 | result = vvp_page_sync_io(env, io, pg, cp, CRT_READ); | |
898 | /* | |
899 | * In older implementations, obdo_refresh_inode is called here | |
900 | * to update the inode because the write might modify the | |
901 | * object info at OST. However, this has been proven useless, | |
902 | * since LVB functions will be called when user space program | |
903 | * tries to retrieve inode attribute. Also, see bug 15909 for | |
904 | * details. -jay | |
905 | */ | |
906 | if (result == 0) | |
907 | cl_page_export(env, pg, 1); | |
908 | } | |
909 | return result; | |
910 | } | |
911 | ||
912 | static int vvp_io_prepare_write(const struct lu_env *env, | |
913 | const struct cl_io_slice *ios, | |
914 | const struct cl_page_slice *slice, | |
915 | unsigned from, unsigned to) | |
916 | { | |
917 | struct cl_object *obj = slice->cpl_obj; | |
918 | struct ccc_page *cp = cl2ccc_page(slice); | |
919 | struct cl_page *pg = slice->cpl_page; | |
920 | struct page *vmpage = cp->cpg_page; | |
921 | ||
922 | int result; | |
923 | ||
924 | ENTRY; | |
925 | ||
926 | LINVRNT(cl_page_is_vmlocked(env, pg)); | |
927 | LASSERT(vmpage->mapping->host == ccc_object_inode(obj)); | |
928 | ||
929 | result = 0; | |
930 | ||
931 | CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to); | |
932 | if (!PageUptodate(vmpage)) { | |
933 | /* | |
934 | * We're completely overwriting an existing page, so _don't_ | |
935 | * set it up to date until commit_write | |
936 | */ | |
937 | if (from == 0 && to == PAGE_CACHE_SIZE) { | |
938 | CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); | |
939 | POISON_PAGE(page, 0x11); | |
940 | } else | |
941 | result = vvp_io_prepare_partial(env, ios->cis_io, obj, | |
942 | pg, cp, from, to); | |
943 | } else | |
944 | CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n"); | |
945 | RETURN(result); | |
946 | } | |
947 | ||
948 | static int vvp_io_commit_write(const struct lu_env *env, | |
949 | const struct cl_io_slice *ios, | |
950 | const struct cl_page_slice *slice, | |
951 | unsigned from, unsigned to) | |
952 | { | |
953 | struct cl_object *obj = slice->cpl_obj; | |
954 | struct cl_io *io = ios->cis_io; | |
955 | struct ccc_page *cp = cl2ccc_page(slice); | |
956 | struct cl_page *pg = slice->cpl_page; | |
957 | struct inode *inode = ccc_object_inode(obj); | |
958 | struct ll_sb_info *sbi = ll_i2sbi(inode); | |
959 | struct ll_inode_info *lli = ll_i2info(inode); | |
960 | struct page *vmpage = cp->cpg_page; | |
961 | ||
962 | int result; | |
963 | int tallyop; | |
964 | loff_t size; | |
965 | ||
966 | ENTRY; | |
967 | ||
968 | LINVRNT(cl_page_is_vmlocked(env, pg)); | |
969 | LASSERT(vmpage->mapping->host == inode); | |
970 | ||
971 | LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n"); | |
972 | CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to); | |
973 | ||
974 | /* | |
975 | * queue a write for some time in the future the first time we | |
976 | * dirty the page. | |
977 | * | |
978 | * This is different from what other file systems do: they usually | |
979 | * just mark page (and some of its buffers) dirty and rely on | |
980 | * balance_dirty_pages() to start a write-back. Lustre wants write-back | |
981 | * to be started earlier for the following reasons: | |
982 | * | |
983 | * (1) with a large number of clients we need to limit the amount | |
984 | * of cached data on the clients a lot; | |
985 | * | |
986 | * (2) large compute jobs generally want compute-only then io-only | |
987 | * and the IO should complete as quickly as possible; | |
988 | * | |
989 | * (3) IO is batched up to the RPC size and is async until the | |
990 | * client max cache is hit | |
991 | * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) | |
992 | * | |
993 | */ | |
994 | if (!PageDirty(vmpage)) { | |
995 | tallyop = LPROC_LL_DIRTY_MISSES; | |
996 | result = cl_page_cache_add(env, io, pg, CRT_WRITE); | |
997 | if (result == 0) { | |
998 | /* page was added into cache successfully. */ | |
999 | set_page_dirty(vmpage); | |
1000 | vvp_write_pending(cl2ccc(obj), cp); | |
1001 | } else if (result == -EDQUOT) { | |
1002 | pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; | |
1003 | bool need_clip = true; | |
1004 | ||
1005 | /* | |
1006 | * Client ran out of disk space grant. Possible | |
1007 | * strategies are: | |
1008 | * | |
1009 | * (a) do a sync write, renewing grant; | |
1010 | * | |
1011 | * (b) stop writing on this stripe, switch to the | |
1012 | * next one. | |
1013 | * | |
1014 | * (b) is a part of "parallel io" design that is the | |
1015 | * ultimate goal. (a) is what "old" client did, and | |
1016 | * what the new code continues to do for the time | |
1017 | * being. | |
1018 | */ | |
1019 | if (last_index > pg->cp_index) { | |
1020 | to = PAGE_CACHE_SIZE; | |
1021 | need_clip = false; | |
1022 | } else if (last_index == pg->cp_index) { | |
1023 | int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; | |
1024 | if (to < size_to) | |
1025 | to = size_to; | |
1026 | } | |
1027 | if (need_clip) | |
1028 | cl_page_clip(env, pg, 0, to); | |
1029 | result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); | |
1030 | if (result) | |
1031 | CERROR("Write page %lu of inode %p failed %d\n", | |
1032 | pg->cp_index, inode, result); | |
1033 | } | |
1034 | } else { | |
1035 | tallyop = LPROC_LL_DIRTY_HITS; | |
1036 | result = 0; | |
1037 | } | |
1038 | ll_stats_ops_tally(sbi, tallyop, 1); | |
1039 | ||
1040 | /* Inode should be marked DIRTY even if no new page was marked DIRTY | |
1041 | * because page could have been not flushed between 2 modifications. | |
1042 | * It is important the file is marked DIRTY as soon as the I/O is done | |
1043 | * Indeed, when cache is flushed, file could be already closed and it | |
1044 | * is too late to warn the MDT. | |
1045 | * It is acceptable that file is marked DIRTY even if I/O is dropped | |
1046 | * for some reasons before being flushed to OST. | |
1047 | */ | |
1048 | if (result == 0) { | |
1049 | spin_lock(&lli->lli_lock); | |
1050 | lli->lli_flags |= LLIF_DATA_MODIFIED; | |
1051 | spin_unlock(&lli->lli_lock); | |
1052 | } | |
1053 | ||
1054 | size = cl_offset(obj, pg->cp_index) + to; | |
1055 | ||
1056 | ll_inode_size_lock(inode); | |
1057 | if (result == 0) { | |
1058 | if (size > i_size_read(inode)) { | |
1059 | cl_isize_write_nolock(inode, size); | |
1060 | CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n", | |
1061 | PFID(lu_object_fid(&obj->co_lu)), | |
1062 | (unsigned long)size); | |
1063 | } | |
1064 | cl_page_export(env, pg, 1); | |
1065 | } else { | |
1066 | if (size > i_size_read(inode)) | |
1067 | cl_page_discard(env, io, pg); | |
1068 | } | |
1069 | ll_inode_size_unlock(inode); | |
1070 | RETURN(result); | |
1071 | } | |
1072 | ||
1073 | static const struct cl_io_operations vvp_io_ops = { | |
1074 | .op = { | |
1075 | [CIT_READ] = { | |
1076 | .cio_fini = vvp_io_read_fini, | |
1077 | .cio_lock = vvp_io_read_lock, | |
1078 | .cio_start = vvp_io_read_start, | |
1079 | .cio_advance = ccc_io_advance | |
1080 | }, | |
1081 | [CIT_WRITE] = { | |
1082 | .cio_fini = vvp_io_fini, | |
1083 | .cio_lock = vvp_io_write_lock, | |
1084 | .cio_start = vvp_io_write_start, | |
1085 | .cio_advance = ccc_io_advance | |
1086 | }, | |
1087 | [CIT_SETATTR] = { | |
1088 | .cio_fini = vvp_io_setattr_fini, | |
1089 | .cio_iter_init = vvp_io_setattr_iter_init, | |
1090 | .cio_lock = vvp_io_setattr_lock, | |
1091 | .cio_start = vvp_io_setattr_start, | |
1092 | .cio_end = vvp_io_setattr_end | |
1093 | }, | |
1094 | [CIT_FAULT] = { | |
1095 | .cio_fini = vvp_io_fault_fini, | |
1096 | .cio_iter_init = vvp_io_fault_iter_init, | |
1097 | .cio_lock = vvp_io_fault_lock, | |
1098 | .cio_start = vvp_io_fault_start, | |
1099 | .cio_end = ccc_io_end | |
1100 | }, | |
1101 | [CIT_FSYNC] = { | |
1102 | .cio_start = vvp_io_fsync_start, | |
1103 | .cio_fini = vvp_io_fini | |
1104 | }, | |
1105 | [CIT_MISC] = { | |
1106 | .cio_fini = vvp_io_fini | |
1107 | } | |
1108 | }, | |
1109 | .cio_read_page = vvp_io_read_page, | |
1110 | .cio_prepare_write = vvp_io_prepare_write, | |
1111 | .cio_commit_write = vvp_io_commit_write | |
1112 | }; | |
1113 | ||
1114 | int vvp_io_init(const struct lu_env *env, struct cl_object *obj, | |
1115 | struct cl_io *io) | |
1116 | { | |
1117 | struct vvp_io *vio = vvp_env_io(env); | |
1118 | struct ccc_io *cio = ccc_env_io(env); | |
1119 | struct inode *inode = ccc_object_inode(obj); | |
1120 | int result; | |
1121 | ||
1122 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
1123 | ENTRY; | |
1124 | ||
1125 | CL_IO_SLICE_CLEAN(cio, cui_cl); | |
1126 | cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); | |
1127 | vio->cui_ra_window_set = 0; | |
1128 | result = 0; | |
1129 | if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { | |
1130 | size_t count; | |
1131 | struct ll_inode_info *lli = ll_i2info(inode); | |
1132 | ||
1133 | count = io->u.ci_rw.crw_count; | |
1134 | /* "If nbyte is 0, read() will return 0 and have no other | |
1135 | * results." -- Single Unix Spec */ | |
1136 | if (count == 0) | |
1137 | result = 1; | |
1138 | else { | |
1139 | cio->cui_tot_count = count; | |
1140 | cio->cui_tot_nrsegs = 0; | |
1141 | } | |
1142 | /* for read/write, we store the jobid in the inode, and | |
1143 | * it'll be fetched by osc when building RPC. | |
1144 | * | |
1145 | * it's not accurate if the file is shared by different | |
1146 | * jobs. | |
1147 | */ | |
1148 | lustre_get_jobid(lli->lli_jobid); | |
1149 | } else if (io->ci_type == CIT_SETATTR) { | |
1150 | if (!cl_io_is_trunc(io)) | |
1151 | io->ci_lockreq = CILR_MANDATORY; | |
1152 | } | |
1153 | ||
1154 | /* ignore layout change for generic CIT_MISC but not for glimpse. | |
1155 | * io context for glimpse must set ci_verify_layout to true, | |
1156 | * see cl_glimpse_size0() for details. */ | |
1157 | if (io->ci_type == CIT_MISC && !io->ci_verify_layout) | |
1158 | io->ci_ignore_layout = 1; | |
1159 | ||
1160 | /* Enqueue layout lock and get layout version. We need to do this | |
1161 | * even for operations requiring to open file, such as read and write, | |
1162 | * because it might not grant layout lock in IT_OPEN. */ | |
65fb55d1 | 1163 | if (result == 0 && !io->ci_ignore_layout) { |
d7e09d03 | 1164 | result = ll_layout_refresh(inode, &cio->cui_layout_gen); |
65fb55d1 NY |
1165 | if (result == -ENOENT) |
1166 | /* If the inode on MDS has been removed, but the objects | |
1167 | * on OSTs haven't been destroyed (async unlink), layout | |
1168 | * fetch will return -ENOENT, we'd ingore this error | |
1169 | * and continue with dirty flush. LU-3230. */ | |
1170 | result = 0; | |
1171 | if (result < 0) | |
1172 | CERROR("%s: refresh file layout " DFID " error %d.\n", | |
1173 | ll_get_fsname(inode->i_sb, NULL, 0), | |
1174 | PFID(lu_object_fid(&obj->co_lu)), result); | |
1175 | } | |
d7e09d03 PT |
1176 | |
1177 | RETURN(result); | |
1178 | } | |
1179 | ||
1180 | static struct vvp_io *cl2vvp_io(const struct lu_env *env, | |
1181 | const struct cl_io_slice *slice) | |
1182 | { | |
1183 | /* Caling just for assertion */ | |
1184 | cl2ccc_io(env, slice); | |
1185 | return vvp_env_io(env); | |
1186 | } |