4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <linux/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
50 /*****************************************************************************
56 #define cl_io_for_each(slice, io) \
57 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
58 #define cl_io_for_each_reverse(slice, io) \
59 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
61 static inline int cl_io_type_is_valid(enum cl_io_type type
)
63 return CIT_READ
<= type
&& type
< CIT_OP_NR
;
66 static inline int cl_io_is_loopable(const struct cl_io
*io
)
68 return cl_io_type_is_valid(io
->ci_type
) && io
->ci_type
!= CIT_MISC
;
72 * Returns true iff there is an IO ongoing in the given environment.
74 int cl_io_is_going(const struct lu_env
*env
)
76 return cl_env_info(env
)->clt_current_io
!= NULL
;
78 EXPORT_SYMBOL(cl_io_is_going
);
81 * cl_io invariant that holds at all times when exported cl_io_*() functions
82 * are entered and left.
84 static int cl_io_invariant(const struct cl_io
*io
)
91 * io can own pages only when it is ongoing. Sub-io might
92 * still be in CIS_LOCKED state when top-io is in
95 ergo(io
->ci_owned_nr
> 0, io
->ci_state
== CIS_IO_GOING
||
96 (io
->ci_state
== CIS_LOCKED
&& up
!= NULL
));
100 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
102 void cl_io_fini(const struct lu_env
*env
, struct cl_io
*io
)
104 struct cl_io_slice
*slice
;
105 struct cl_thread_info
*info
;
107 LINVRNT(cl_io_type_is_valid(io
->ci_type
));
108 LINVRNT(cl_io_invariant(io
));
111 while (!list_empty(&io
->ci_layers
)) {
112 slice
= container_of(io
->ci_layers
.prev
, struct cl_io_slice
,
114 list_del_init(&slice
->cis_linkage
);
115 if (slice
->cis_iop
->op
[io
->ci_type
].cio_fini
!= NULL
)
116 slice
->cis_iop
->op
[io
->ci_type
].cio_fini(env
, slice
);
118 * Invalidate slice to catch use after free. This assumes that
119 * slices are allocated within session and can be touched
120 * after ->cio_fini() returns.
122 slice
->cis_io
= NULL
;
124 io
->ci_state
= CIS_FINI
;
125 info
= cl_env_info(env
);
126 if (info
->clt_current_io
== io
)
127 info
->clt_current_io
= NULL
;
129 /* sanity check for layout change */
130 switch(io
->ci_type
) {
136 LASSERT(!io
->ci_need_restart
);
140 /* Check ignore layout change conf */
141 LASSERT(ergo(io
->ci_ignore_layout
|| !io
->ci_verify_layout
,
142 !io
->ci_need_restart
));
149 EXPORT_SYMBOL(cl_io_fini
);
151 static int cl_io_init0(const struct lu_env
*env
, struct cl_io
*io
,
152 enum cl_io_type iot
, struct cl_object
*obj
)
154 struct cl_object
*scan
;
157 LINVRNT(io
->ci_state
== CIS_ZERO
|| io
->ci_state
== CIS_FINI
);
158 LINVRNT(cl_io_type_is_valid(iot
));
159 LINVRNT(cl_io_invariant(io
));
163 INIT_LIST_HEAD(&io
->ci_lockset
.cls_todo
);
164 INIT_LIST_HEAD(&io
->ci_lockset
.cls_curr
);
165 INIT_LIST_HEAD(&io
->ci_lockset
.cls_done
);
166 INIT_LIST_HEAD(&io
->ci_layers
);
169 cl_object_for_each(scan
, obj
) {
170 if (scan
->co_ops
->coo_io_init
!= NULL
) {
171 result
= scan
->co_ops
->coo_io_init(env
, scan
, io
);
177 io
->ci_state
= CIS_INIT
;
182 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
184 * \pre obj != cl_object_top(obj)
186 int cl_io_sub_init(const struct lu_env
*env
, struct cl_io
*io
,
187 enum cl_io_type iot
, struct cl_object
*obj
)
189 struct cl_thread_info
*info
= cl_env_info(env
);
191 LASSERT(obj
!= cl_object_top(obj
));
192 if (info
->clt_current_io
== NULL
)
193 info
->clt_current_io
= io
;
194 return cl_io_init0(env
, io
, iot
, obj
);
196 EXPORT_SYMBOL(cl_io_sub_init
);
199 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
201 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
202 * what the latter returned.
204 * \pre obj == cl_object_top(obj)
205 * \pre cl_io_type_is_valid(iot)
206 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
208 int cl_io_init(const struct lu_env
*env
, struct cl_io
*io
,
209 enum cl_io_type iot
, struct cl_object
*obj
)
211 struct cl_thread_info
*info
= cl_env_info(env
);
213 LASSERT(obj
== cl_object_top(obj
));
214 LASSERT(info
->clt_current_io
== NULL
);
216 info
->clt_current_io
= io
;
217 return cl_io_init0(env
, io
, iot
, obj
);
219 EXPORT_SYMBOL(cl_io_init
);
222 * Initialize read or write io.
224 * \pre iot == CIT_READ || iot == CIT_WRITE
226 int cl_io_rw_init(const struct lu_env
*env
, struct cl_io
*io
,
227 enum cl_io_type iot
, loff_t pos
, size_t count
)
229 LINVRNT(iot
== CIT_READ
|| iot
== CIT_WRITE
);
230 LINVRNT(io
->ci_obj
!= NULL
);
233 LU_OBJECT_HEADER(D_VFSTRACE
, env
, &io
->ci_obj
->co_lu
,
234 "io range: %u ["LPU64
", "LPU64
") %u %u\n",
235 iot
, (__u64
)pos
, (__u64
)pos
+ count
,
236 io
->u
.ci_rw
.crw_nonblock
, io
->u
.ci_wr
.wr_append
);
237 io
->u
.ci_rw
.crw_pos
= pos
;
238 io
->u
.ci_rw
.crw_count
= count
;
239 RETURN(cl_io_init(env
, io
, iot
, io
->ci_obj
));
241 EXPORT_SYMBOL(cl_io_rw_init
);
243 static inline const struct lu_fid
*
244 cl_lock_descr_fid(const struct cl_lock_descr
*descr
)
246 return lu_object_fid(&descr
->cld_obj
->co_lu
);
249 static int cl_lock_descr_sort(const struct cl_lock_descr
*d0
,
250 const struct cl_lock_descr
*d1
)
252 return lu_fid_cmp(cl_lock_descr_fid(d0
), cl_lock_descr_fid(d1
)) ?:
253 __diff_normalize(d0
->cld_start
, d1
->cld_start
);
256 static int cl_lock_descr_cmp(const struct cl_lock_descr
*d0
,
257 const struct cl_lock_descr
*d1
)
261 ret
= lu_fid_cmp(cl_lock_descr_fid(d0
), cl_lock_descr_fid(d1
));
264 if (d0
->cld_end
< d1
->cld_start
)
266 if (d0
->cld_start
> d0
->cld_end
)
271 static void cl_lock_descr_merge(struct cl_lock_descr
*d0
,
272 const struct cl_lock_descr
*d1
)
274 d0
->cld_start
= min(d0
->cld_start
, d1
->cld_start
);
275 d0
->cld_end
= max(d0
->cld_end
, d1
->cld_end
);
277 if (d1
->cld_mode
== CLM_WRITE
&& d0
->cld_mode
!= CLM_WRITE
)
278 d0
->cld_mode
= CLM_WRITE
;
280 if (d1
->cld_mode
== CLM_GROUP
&& d0
->cld_mode
!= CLM_GROUP
)
281 d0
->cld_mode
= CLM_GROUP
;
285 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
287 static void cl_io_locks_sort(struct cl_io
*io
)
292 /* hidden treasure: bubble sort for now. */
294 struct cl_io_lock_link
*curr
;
295 struct cl_io_lock_link
*prev
;
296 struct cl_io_lock_link
*temp
;
301 list_for_each_entry_safe(curr
, temp
,
302 &io
->ci_lockset
.cls_todo
,
305 switch (cl_lock_descr_sort(&prev
->cill_descr
,
306 &curr
->cill_descr
)) {
309 * IMPOSSIBLE: Identical locks are
316 list_move_tail(&curr
->cill_linkage
,
317 &prev
->cill_linkage
);
319 continue; /* don't change prev: it's
320 * still "previous" */
321 case -1: /* already in order */
332 * Check whether \a queue contains locks matching \a need.
334 * \retval +ve there is a matching lock in the \a queue
335 * \retval 0 there are no matching locks in the \a queue
337 int cl_queue_match(const struct list_head
*queue
,
338 const struct cl_lock_descr
*need
)
340 struct cl_io_lock_link
*scan
;
343 list_for_each_entry(scan
, queue
, cill_linkage
) {
344 if (cl_lock_descr_match(&scan
->cill_descr
, need
))
349 EXPORT_SYMBOL(cl_queue_match
);
351 static int cl_queue_merge(const struct list_head
*queue
,
352 const struct cl_lock_descr
*need
)
354 struct cl_io_lock_link
*scan
;
357 list_for_each_entry(scan
, queue
, cill_linkage
) {
358 if (cl_lock_descr_cmp(&scan
->cill_descr
, need
))
360 cl_lock_descr_merge(&scan
->cill_descr
, need
);
361 CDEBUG(D_VFSTRACE
, "lock: %d: [%lu, %lu]\n",
362 scan
->cill_descr
.cld_mode
, scan
->cill_descr
.cld_start
,
363 scan
->cill_descr
.cld_end
);
370 static int cl_lockset_match(const struct cl_lockset
*set
,
371 const struct cl_lock_descr
*need
)
373 return cl_queue_match(&set
->cls_curr
, need
) ||
374 cl_queue_match(&set
->cls_done
, need
);
377 static int cl_lockset_merge(const struct cl_lockset
*set
,
378 const struct cl_lock_descr
*need
)
380 return cl_queue_merge(&set
->cls_todo
, need
) ||
381 cl_lockset_match(set
, need
);
384 static int cl_lockset_lock_one(const struct lu_env
*env
,
385 struct cl_io
*io
, struct cl_lockset
*set
,
386 struct cl_io_lock_link
*link
)
388 struct cl_lock
*lock
;
393 lock
= cl_lock_request(env
, io
, &link
->cill_descr
, "io", io
);
396 link
->cill_lock
= lock
;
397 list_move(&link
->cill_linkage
, &set
->cls_curr
);
398 if (!(link
->cill_descr
.cld_enq_flags
& CEF_ASYNC
)) {
399 result
= cl_wait(env
, lock
);
401 list_move(&link
->cill_linkage
,
406 result
= PTR_ERR(lock
);
410 static void cl_lock_link_fini(const struct lu_env
*env
, struct cl_io
*io
,
411 struct cl_io_lock_link
*link
)
413 struct cl_lock
*lock
= link
->cill_lock
;
416 list_del_init(&link
->cill_linkage
);
418 cl_lock_release(env
, lock
, "io", io
);
419 link
->cill_lock
= NULL
;
421 if (link
->cill_fini
!= NULL
)
422 link
->cill_fini(env
, link
);
426 static int cl_lockset_lock(const struct lu_env
*env
, struct cl_io
*io
,
427 struct cl_lockset
*set
)
429 struct cl_io_lock_link
*link
;
430 struct cl_io_lock_link
*temp
;
431 struct cl_lock
*lock
;
436 list_for_each_entry_safe(link
, temp
, &set
->cls_todo
, cill_linkage
) {
437 if (!cl_lockset_match(set
, &link
->cill_descr
)) {
438 /* XXX some locking to guarantee that locks aren't
439 * expanded in between. */
440 result
= cl_lockset_lock_one(env
, io
, set
, link
);
444 cl_lock_link_fini(env
, io
, link
);
447 list_for_each_entry_safe(link
, temp
,
448 &set
->cls_curr
, cill_linkage
) {
449 lock
= link
->cill_lock
;
450 result
= cl_wait(env
, lock
);
452 list_move(&link
->cill_linkage
,
462 * Takes locks necessary for the current iteration of io.
464 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
465 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
468 int cl_io_lock(const struct lu_env
*env
, struct cl_io
*io
)
470 const struct cl_io_slice
*scan
;
473 LINVRNT(cl_io_is_loopable(io
));
474 LINVRNT(io
->ci_state
== CIS_IT_STARTED
);
475 LINVRNT(cl_io_invariant(io
));
478 cl_io_for_each(scan
, io
) {
479 if (scan
->cis_iop
->op
[io
->ci_type
].cio_lock
== NULL
)
481 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_lock(env
, scan
);
486 cl_io_locks_sort(io
);
487 result
= cl_lockset_lock(env
, io
, &io
->ci_lockset
);
490 cl_io_unlock(env
, io
);
492 io
->ci_state
= CIS_LOCKED
;
495 EXPORT_SYMBOL(cl_io_lock
);
498 * Release locks takes by io.
500 void cl_io_unlock(const struct lu_env
*env
, struct cl_io
*io
)
502 struct cl_lockset
*set
;
503 struct cl_io_lock_link
*link
;
504 struct cl_io_lock_link
*temp
;
505 const struct cl_io_slice
*scan
;
507 LASSERT(cl_io_is_loopable(io
));
508 LASSERT(CIS_IT_STARTED
<= io
->ci_state
&& io
->ci_state
< CIS_UNLOCKED
);
509 LINVRNT(cl_io_invariant(io
));
512 set
= &io
->ci_lockset
;
514 list_for_each_entry_safe(link
, temp
, &set
->cls_todo
, cill_linkage
)
515 cl_lock_link_fini(env
, io
, link
);
517 list_for_each_entry_safe(link
, temp
, &set
->cls_curr
, cill_linkage
)
518 cl_lock_link_fini(env
, io
, link
);
520 list_for_each_entry_safe(link
, temp
, &set
->cls_done
, cill_linkage
) {
521 cl_unuse(env
, link
->cill_lock
);
522 cl_lock_link_fini(env
, io
, link
);
524 cl_io_for_each_reverse(scan
, io
) {
525 if (scan
->cis_iop
->op
[io
->ci_type
].cio_unlock
!= NULL
)
526 scan
->cis_iop
->op
[io
->ci_type
].cio_unlock(env
, scan
);
528 io
->ci_state
= CIS_UNLOCKED
;
529 LASSERT(!cl_env_info(env
)->clt_counters
[CNL_TOP
].ctc_nr_locks_acquired
);
532 EXPORT_SYMBOL(cl_io_unlock
);
535 * Prepares next iteration of io.
537 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
538 * layers a chance to modify io parameters, e.g., so that lov can restrict io
539 * to a single stripe.
541 int cl_io_iter_init(const struct lu_env
*env
, struct cl_io
*io
)
543 const struct cl_io_slice
*scan
;
546 LINVRNT(cl_io_is_loopable(io
));
547 LINVRNT(io
->ci_state
== CIS_INIT
|| io
->ci_state
== CIS_IT_ENDED
);
548 LINVRNT(cl_io_invariant(io
));
552 cl_io_for_each(scan
, io
) {
553 if (scan
->cis_iop
->op
[io
->ci_type
].cio_iter_init
== NULL
)
555 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_iter_init(env
,
561 io
->ci_state
= CIS_IT_STARTED
;
564 EXPORT_SYMBOL(cl_io_iter_init
);
567 * Finalizes io iteration.
569 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
571 void cl_io_iter_fini(const struct lu_env
*env
, struct cl_io
*io
)
573 const struct cl_io_slice
*scan
;
575 LINVRNT(cl_io_is_loopable(io
));
576 LINVRNT(io
->ci_state
== CIS_UNLOCKED
);
577 LINVRNT(cl_io_invariant(io
));
580 cl_io_for_each_reverse(scan
, io
) {
581 if (scan
->cis_iop
->op
[io
->ci_type
].cio_iter_fini
!= NULL
)
582 scan
->cis_iop
->op
[io
->ci_type
].cio_iter_fini(env
, scan
);
584 io
->ci_state
= CIS_IT_ENDED
;
587 EXPORT_SYMBOL(cl_io_iter_fini
);
590 * Records that read or write io progressed \a nob bytes forward.
592 void cl_io_rw_advance(const struct lu_env
*env
, struct cl_io
*io
, size_t nob
)
594 const struct cl_io_slice
*scan
;
596 LINVRNT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
||
598 LINVRNT(cl_io_is_loopable(io
));
599 LINVRNT(cl_io_invariant(io
));
603 io
->u
.ci_rw
.crw_pos
+= nob
;
604 io
->u
.ci_rw
.crw_count
-= nob
;
606 /* layers have to be notified. */
607 cl_io_for_each_reverse(scan
, io
) {
608 if (scan
->cis_iop
->op
[io
->ci_type
].cio_advance
!= NULL
)
609 scan
->cis_iop
->op
[io
->ci_type
].cio_advance(env
, scan
,
614 EXPORT_SYMBOL(cl_io_rw_advance
);
617 * Adds a lock to a lockset.
619 int cl_io_lock_add(const struct lu_env
*env
, struct cl_io
*io
,
620 struct cl_io_lock_link
*link
)
625 if (cl_lockset_merge(&io
->ci_lockset
, &link
->cill_descr
))
628 list_add(&link
->cill_linkage
, &io
->ci_lockset
.cls_todo
);
633 EXPORT_SYMBOL(cl_io_lock_add
);
635 static void cl_free_io_lock_link(const struct lu_env
*env
,
636 struct cl_io_lock_link
*link
)
642 * Allocates new lock link, and uses it to add a lock to a lockset.
644 int cl_io_lock_alloc_add(const struct lu_env
*env
, struct cl_io
*io
,
645 struct cl_lock_descr
*descr
)
647 struct cl_io_lock_link
*link
;
653 link
->cill_descr
= *descr
;
654 link
->cill_fini
= cl_free_io_lock_link
;
655 result
= cl_io_lock_add(env
, io
, link
);
656 if (result
) /* lock match */
657 link
->cill_fini(env
, link
);
663 EXPORT_SYMBOL(cl_io_lock_alloc_add
);
666 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
668 int cl_io_start(const struct lu_env
*env
, struct cl_io
*io
)
670 const struct cl_io_slice
*scan
;
673 LINVRNT(cl_io_is_loopable(io
));
674 LINVRNT(io
->ci_state
== CIS_LOCKED
);
675 LINVRNT(cl_io_invariant(io
));
678 io
->ci_state
= CIS_IO_GOING
;
679 cl_io_for_each(scan
, io
) {
680 if (scan
->cis_iop
->op
[io
->ci_type
].cio_start
== NULL
)
682 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_start(env
, scan
);
690 EXPORT_SYMBOL(cl_io_start
);
693 * Wait until current io iteration is finished by calling
694 * cl_io_operations::cio_end() bottom-to-top.
696 void cl_io_end(const struct lu_env
*env
, struct cl_io
*io
)
698 const struct cl_io_slice
*scan
;
700 LINVRNT(cl_io_is_loopable(io
));
701 LINVRNT(io
->ci_state
== CIS_IO_GOING
);
702 LINVRNT(cl_io_invariant(io
));
705 cl_io_for_each_reverse(scan
, io
) {
706 if (scan
->cis_iop
->op
[io
->ci_type
].cio_end
!= NULL
)
707 scan
->cis_iop
->op
[io
->ci_type
].cio_end(env
, scan
);
708 /* TODO: error handling. */
710 io
->ci_state
= CIS_IO_FINISHED
;
713 EXPORT_SYMBOL(cl_io_end
);
715 static const struct cl_page_slice
*
716 cl_io_slice_page(const struct cl_io_slice
*ios
, struct cl_page
*page
)
718 const struct cl_page_slice
*slice
;
720 slice
= cl_page_at(page
, ios
->cis_obj
->co_lu
.lo_dev
->ld_type
);
721 LINVRNT(slice
!= NULL
);
726 * True iff \a page is within \a io range.
728 static int cl_page_in_io(const struct cl_page
*page
, const struct cl_io
*io
)
735 idx
= page
->cp_index
;
736 switch (io
->ci_type
) {
740 * check that [start, end) and [pos, pos + count) extents
743 if (!cl_io_is_append(io
)) {
744 const struct cl_io_rw_common
*crw
= &(io
->u
.ci_rw
);
745 start
= cl_offset(page
->cp_obj
, idx
);
746 end
= cl_offset(page
->cp_obj
, idx
+ 1);
747 result
= crw
->crw_pos
< end
&&
748 start
< crw
->crw_pos
+ crw
->crw_count
;
752 result
= io
->u
.ci_fault
.ft_index
== idx
;
761 * Called by read io, when page has to be read from the server.
763 * \see cl_io_operations::cio_read_page()
765 int cl_io_read_page(const struct lu_env
*env
, struct cl_io
*io
,
766 struct cl_page
*page
)
768 const struct cl_io_slice
*scan
;
769 struct cl_2queue
*queue
;
772 LINVRNT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_FAULT
);
773 LINVRNT(cl_page_is_owned(page
, io
));
774 LINVRNT(io
->ci_state
== CIS_IO_GOING
|| io
->ci_state
== CIS_LOCKED
);
775 LINVRNT(cl_page_in_io(page
, io
));
776 LINVRNT(cl_io_invariant(io
));
779 queue
= &io
->ci_queue
;
781 cl_2queue_init(queue
);
783 * ->cio_read_page() methods called in the loop below are supposed to
784 * never block waiting for network (the only subtle point is the
785 * creation of new pages for read-ahead that might result in cache
786 * shrinking, but currently only clean pages are shrunk and this
787 * requires no network io).
789 * Should this ever starts blocking, retry loop would be needed for
790 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
792 cl_io_for_each(scan
, io
) {
793 if (scan
->cis_iop
->cio_read_page
!= NULL
) {
794 const struct cl_page_slice
*slice
;
796 slice
= cl_io_slice_page(scan
, page
);
797 LINVRNT(slice
!= NULL
);
798 result
= scan
->cis_iop
->cio_read_page(env
, scan
, slice
);
804 result
= cl_io_submit_rw(env
, io
, CRT_READ
, queue
);
806 * Unlock unsent pages in case of error.
808 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
809 cl_2queue_fini(env
, queue
);
812 EXPORT_SYMBOL(cl_io_read_page
);
815 * Called by write io to prepare page to receive data from user buffer.
817 * \see cl_io_operations::cio_prepare_write()
819 int cl_io_prepare_write(const struct lu_env
*env
, struct cl_io
*io
,
820 struct cl_page
*page
, unsigned from
, unsigned to
)
822 const struct cl_io_slice
*scan
;
825 LINVRNT(io
->ci_type
== CIT_WRITE
);
826 LINVRNT(cl_page_is_owned(page
, io
));
827 LINVRNT(io
->ci_state
== CIS_IO_GOING
|| io
->ci_state
== CIS_LOCKED
);
828 LINVRNT(cl_io_invariant(io
));
829 LASSERT(cl_page_in_io(page
, io
));
832 cl_io_for_each_reverse(scan
, io
) {
833 if (scan
->cis_iop
->cio_prepare_write
!= NULL
) {
834 const struct cl_page_slice
*slice
;
836 slice
= cl_io_slice_page(scan
, page
);
837 result
= scan
->cis_iop
->cio_prepare_write(env
, scan
,
846 EXPORT_SYMBOL(cl_io_prepare_write
);
849 * Called by write io after user data were copied into a page.
851 * \see cl_io_operations::cio_commit_write()
853 int cl_io_commit_write(const struct lu_env
*env
, struct cl_io
*io
,
854 struct cl_page
*page
, unsigned from
, unsigned to
)
856 const struct cl_io_slice
*scan
;
859 LINVRNT(io
->ci_type
== CIT_WRITE
);
860 LINVRNT(io
->ci_state
== CIS_IO_GOING
|| io
->ci_state
== CIS_LOCKED
);
861 LINVRNT(cl_io_invariant(io
));
863 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
864 * already called cl_page_cache_add(), moving page into CPS_CACHED
865 * state. Better (and more general) way of dealing with such situation
868 LASSERT(cl_page_is_owned(page
, io
) || page
->cp_parent
!= NULL
);
869 LASSERT(cl_page_in_io(page
, io
));
872 cl_io_for_each(scan
, io
) {
873 if (scan
->cis_iop
->cio_commit_write
!= NULL
) {
874 const struct cl_page_slice
*slice
;
876 slice
= cl_io_slice_page(scan
, page
);
877 result
= scan
->cis_iop
->cio_commit_write(env
, scan
,
884 LINVRNT(result
<= 0);
887 EXPORT_SYMBOL(cl_io_commit_write
);
890 * Submits a list of pages for immediate io.
892 * After the function gets returned, The submitted pages are moved to
893 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
894 * to be submitted, and the pages are errant to submit.
896 * \returns 0 if at least one page was submitted, error code otherwise.
897 * \see cl_io_operations::cio_submit()
899 int cl_io_submit_rw(const struct lu_env
*env
, struct cl_io
*io
,
900 enum cl_req_type crt
, struct cl_2queue
*queue
)
902 const struct cl_io_slice
*scan
;
905 LINVRNT(crt
< ARRAY_SIZE(scan
->cis_iop
->req_op
));
908 cl_io_for_each(scan
, io
) {
909 if (scan
->cis_iop
->req_op
[crt
].cio_submit
== NULL
)
911 result
= scan
->cis_iop
->req_op
[crt
].cio_submit(env
, scan
, crt
,
917 * If ->cio_submit() failed, no pages were sent.
919 LASSERT(ergo(result
!= 0, list_empty(&queue
->c2_qout
.pl_pages
)));
922 EXPORT_SYMBOL(cl_io_submit_rw
);
925 * Submit a sync_io and wait for the IO to be finished, or error happens.
926 * If \a timeout is zero, it means to wait for the IO unconditionally.
928 int cl_io_submit_sync(const struct lu_env
*env
, struct cl_io
*io
,
929 enum cl_req_type iot
, struct cl_2queue
*queue
,
932 struct cl_sync_io
*anchor
= &cl_env_info(env
)->clt_anchor
;
936 cl_page_list_for_each(pg
, &queue
->c2_qin
) {
937 LASSERT(pg
->cp_sync_io
== NULL
);
938 pg
->cp_sync_io
= anchor
;
941 cl_sync_io_init(anchor
, queue
->c2_qin
.pl_nr
);
942 rc
= cl_io_submit_rw(env
, io
, iot
, queue
);
945 * If some pages weren't sent for any reason (e.g.,
946 * read found up-to-date pages in the cache, or write found
947 * clean pages), count them as completed to avoid infinite
950 cl_page_list_for_each(pg
, &queue
->c2_qin
) {
951 pg
->cp_sync_io
= NULL
;
952 cl_sync_io_note(anchor
, +1);
955 /* wait for the IO to be finished. */
956 rc
= cl_sync_io_wait(env
, io
, &queue
->c2_qout
,
959 LASSERT(list_empty(&queue
->c2_qout
.pl_pages
));
960 cl_page_list_for_each(pg
, &queue
->c2_qin
)
961 pg
->cp_sync_io
= NULL
;
965 EXPORT_SYMBOL(cl_io_submit_sync
);
968 * Cancel an IO which has been submitted by cl_io_submit_rw.
970 int cl_io_cancel(const struct lu_env
*env
, struct cl_io
*io
,
971 struct cl_page_list
*queue
)
973 struct cl_page
*page
;
976 CERROR("Canceling ongoing page trasmission\n");
977 cl_page_list_for_each(page
, queue
) {
980 LINVRNT(cl_page_in_io(page
, io
));
981 rc
= cl_page_cancel(env
, page
);
982 result
= result
?: rc
;
986 EXPORT_SYMBOL(cl_io_cancel
);
991 * Pumps io through iterations calling
993 * - cl_io_iter_init()
1003 * - cl_io_iter_fini()
1005 * repeatedly until there is no more io to do.
1007 int cl_io_loop(const struct lu_env
*env
, struct cl_io
*io
)
1011 LINVRNT(cl_io_is_loopable(io
));
1017 io
->ci_continue
= 0;
1018 result
= cl_io_iter_init(env
, io
);
1021 result
= cl_io_lock(env
, io
);
1024 * Notify layers that locks has been taken,
1025 * and do actual i/o.
1027 * - llite: kms, short read;
1028 * - llite: generic_file_read();
1030 result
= cl_io_start(env
, io
);
1032 * Send any remaining pending
1035 * - llite: ll_rw_stats_tally.
1038 cl_io_unlock(env
, io
);
1039 cl_io_rw_advance(env
, io
, io
->ci_nob
- nob
);
1042 cl_io_iter_fini(env
, io
);
1043 } while (result
== 0 && io
->ci_continue
);
1045 result
= io
->ci_result
;
1046 RETURN(result
< 0 ? result
: 0);
1048 EXPORT_SYMBOL(cl_io_loop
);
1051 * Adds io slice to the cl_io.
1053 * This is called by cl_object_operations::coo_io_init() methods to add a
1054 * per-layer state to the io. New state is added at the end of
1055 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1057 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1059 void cl_io_slice_add(struct cl_io
*io
, struct cl_io_slice
*slice
,
1060 struct cl_object
*obj
,
1061 const struct cl_io_operations
*ops
)
1063 struct list_head
*linkage
= &slice
->cis_linkage
;
1065 LASSERT((linkage
->prev
== NULL
&& linkage
->next
== NULL
) ||
1066 list_empty(linkage
));
1069 list_add_tail(linkage
, &io
->ci_layers
);
1071 slice
->cis_obj
= obj
;
1072 slice
->cis_iop
= ops
;
1075 EXPORT_SYMBOL(cl_io_slice_add
);
1079 * Initializes page list.
1081 void cl_page_list_init(struct cl_page_list
*plist
)
1085 INIT_LIST_HEAD(&plist
->pl_pages
);
1086 plist
->pl_owner
= current
;
1089 EXPORT_SYMBOL(cl_page_list_init
);
1092 * Adds a page to a page list.
1094 void cl_page_list_add(struct cl_page_list
*plist
, struct cl_page
*page
)
1097 /* it would be better to check that page is owned by "current" io, but
1098 * it is not passed here. */
1099 LASSERT(page
->cp_owner
!= NULL
);
1100 LINVRNT(plist
->pl_owner
== current
);
1103 mutex_lock(&page
->cp_mutex
);
1105 LASSERT(list_empty(&page
->cp_batch
));
1106 list_add_tail(&page
->cp_batch
, &plist
->pl_pages
);
1108 page
->cp_queue_ref
= lu_ref_add(&page
->cp_reference
, "queue", plist
);
1112 EXPORT_SYMBOL(cl_page_list_add
);
1115 * Removes a page from a page list.
1117 void cl_page_list_del(const struct lu_env
*env
,
1118 struct cl_page_list
*plist
, struct cl_page
*page
)
1120 LASSERT(plist
->pl_nr
> 0);
1121 LINVRNT(plist
->pl_owner
== current
);
1124 list_del_init(&page
->cp_batch
);
1126 mutex_unlock(&page
->cp_mutex
);
1129 lu_ref_del_at(&page
->cp_reference
, page
->cp_queue_ref
, "queue", plist
);
1130 cl_page_put(env
, page
);
1133 EXPORT_SYMBOL(cl_page_list_del
);
1136 * Moves a page from one page list to another.
1138 void cl_page_list_move(struct cl_page_list
*dst
, struct cl_page_list
*src
,
1139 struct cl_page
*page
)
1141 LASSERT(src
->pl_nr
> 0);
1142 LINVRNT(dst
->pl_owner
== current
);
1143 LINVRNT(src
->pl_owner
== current
);
1146 list_move_tail(&page
->cp_batch
, &dst
->pl_pages
);
1149 lu_ref_set_at(&page
->cp_reference
,
1150 page
->cp_queue_ref
, "queue", src
, dst
);
1153 EXPORT_SYMBOL(cl_page_list_move
);
1156 * splice the cl_page_list, just as list head does
1158 void cl_page_list_splice(struct cl_page_list
*list
, struct cl_page_list
*head
)
1160 struct cl_page
*page
;
1161 struct cl_page
*tmp
;
1163 LINVRNT(list
->pl_owner
== current
);
1164 LINVRNT(head
->pl_owner
== current
);
1167 cl_page_list_for_each_safe(page
, tmp
, list
)
1168 cl_page_list_move(head
, list
, page
);
1171 EXPORT_SYMBOL(cl_page_list_splice
);
1173 void cl_page_disown0(const struct lu_env
*env
,
1174 struct cl_io
*io
, struct cl_page
*pg
);
1177 * Disowns pages in a queue.
1179 void cl_page_list_disown(const struct lu_env
*env
,
1180 struct cl_io
*io
, struct cl_page_list
*plist
)
1182 struct cl_page
*page
;
1183 struct cl_page
*temp
;
1185 LINVRNT(plist
->pl_owner
== current
);
1188 cl_page_list_for_each_safe(page
, temp
, plist
) {
1189 LASSERT(plist
->pl_nr
> 0);
1191 list_del_init(&page
->cp_batch
);
1193 mutex_unlock(&page
->cp_mutex
);
1197 * cl_page_disown0 rather than usual cl_page_disown() is used,
1198 * because pages are possibly in CPS_FREEING state already due
1199 * to the call to cl_page_list_discard().
1202 * XXX cl_page_disown0() will fail if page is not locked.
1204 cl_page_disown0(env
, io
, page
);
1205 lu_ref_del(&page
->cp_reference
, "queue", plist
);
1206 cl_page_put(env
, page
);
1210 EXPORT_SYMBOL(cl_page_list_disown
);
1213 * Releases pages from queue.
1215 void cl_page_list_fini(const struct lu_env
*env
, struct cl_page_list
*plist
)
1217 struct cl_page
*page
;
1218 struct cl_page
*temp
;
1220 LINVRNT(plist
->pl_owner
== current
);
1223 cl_page_list_for_each_safe(page
, temp
, plist
)
1224 cl_page_list_del(env
, plist
, page
);
1225 LASSERT(plist
->pl_nr
== 0);
1228 EXPORT_SYMBOL(cl_page_list_fini
);
1231 * Owns all pages in a queue.
1233 int cl_page_list_own(const struct lu_env
*env
,
1234 struct cl_io
*io
, struct cl_page_list
*plist
)
1236 struct cl_page
*page
;
1237 struct cl_page
*temp
;
1241 LINVRNT(plist
->pl_owner
== current
);
1245 cl_page_list_for_each_safe(page
, temp
, plist
) {
1246 LASSERT(index
<= page
->cp_index
);
1247 index
= page
->cp_index
;
1248 if (cl_page_own(env
, io
, page
) == 0)
1249 result
= result
?: page
->cp_error
;
1251 cl_page_list_del(env
, plist
, page
);
1255 EXPORT_SYMBOL(cl_page_list_own
);
1258 * Assumes all pages in a queue.
1260 void cl_page_list_assume(const struct lu_env
*env
,
1261 struct cl_io
*io
, struct cl_page_list
*plist
)
1263 struct cl_page
*page
;
1265 LINVRNT(plist
->pl_owner
== current
);
1267 cl_page_list_for_each(page
, plist
)
1268 cl_page_assume(env
, io
, page
);
1270 EXPORT_SYMBOL(cl_page_list_assume
);
1273 * Discards all pages in a queue.
1275 void cl_page_list_discard(const struct lu_env
*env
, struct cl_io
*io
,
1276 struct cl_page_list
*plist
)
1278 struct cl_page
*page
;
1280 LINVRNT(plist
->pl_owner
== current
);
1282 cl_page_list_for_each(page
, plist
)
1283 cl_page_discard(env
, io
, page
);
1286 EXPORT_SYMBOL(cl_page_list_discard
);
1289 * Unmaps all pages in a queue from user virtual memory.
1291 int cl_page_list_unmap(const struct lu_env
*env
, struct cl_io
*io
,
1292 struct cl_page_list
*plist
)
1294 struct cl_page
*page
;
1297 LINVRNT(plist
->pl_owner
== current
);
1300 cl_page_list_for_each(page
, plist
) {
1301 result
= cl_page_unmap(env
, io
, page
);
1307 EXPORT_SYMBOL(cl_page_list_unmap
);
1310 * Initialize dual page queue.
1312 void cl_2queue_init(struct cl_2queue
*queue
)
1315 cl_page_list_init(&queue
->c2_qin
);
1316 cl_page_list_init(&queue
->c2_qout
);
1319 EXPORT_SYMBOL(cl_2queue_init
);
1322 * Add a page to the incoming page list of 2-queue.
1324 void cl_2queue_add(struct cl_2queue
*queue
, struct cl_page
*page
)
1327 cl_page_list_add(&queue
->c2_qin
, page
);
1330 EXPORT_SYMBOL(cl_2queue_add
);
1333 * Disown pages in both lists of a 2-queue.
1335 void cl_2queue_disown(const struct lu_env
*env
,
1336 struct cl_io
*io
, struct cl_2queue
*queue
)
1339 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
1340 cl_page_list_disown(env
, io
, &queue
->c2_qout
);
1343 EXPORT_SYMBOL(cl_2queue_disown
);
1346 * Discard (truncate) pages in both lists of a 2-queue.
1348 void cl_2queue_discard(const struct lu_env
*env
,
1349 struct cl_io
*io
, struct cl_2queue
*queue
)
1352 cl_page_list_discard(env
, io
, &queue
->c2_qin
);
1353 cl_page_list_discard(env
, io
, &queue
->c2_qout
);
1356 EXPORT_SYMBOL(cl_2queue_discard
);
1359 * Assume to own the pages in cl_2queue
1361 void cl_2queue_assume(const struct lu_env
*env
,
1362 struct cl_io
*io
, struct cl_2queue
*queue
)
1364 cl_page_list_assume(env
, io
, &queue
->c2_qin
);
1365 cl_page_list_assume(env
, io
, &queue
->c2_qout
);
1367 EXPORT_SYMBOL(cl_2queue_assume
);
1370 * Finalize both page lists of a 2-queue.
1372 void cl_2queue_fini(const struct lu_env
*env
, struct cl_2queue
*queue
)
1375 cl_page_list_fini(env
, &queue
->c2_qout
);
1376 cl_page_list_fini(env
, &queue
->c2_qin
);
1379 EXPORT_SYMBOL(cl_2queue_fini
);
1382 * Initialize a 2-queue to contain \a page in its incoming page list.
1384 void cl_2queue_init_page(struct cl_2queue
*queue
, struct cl_page
*page
)
1387 cl_2queue_init(queue
);
1388 cl_2queue_add(queue
, page
);
1391 EXPORT_SYMBOL(cl_2queue_init_page
);
1394 * Returns top-level io.
1396 * \see cl_object_top(), cl_page_top().
1398 struct cl_io
*cl_io_top(struct cl_io
*io
)
1401 while (io
->ci_parent
!= NULL
)
1405 EXPORT_SYMBOL(cl_io_top
);
1408 * Prints human readable representation of \a io to the \a f.
1410 void cl_io_print(const struct lu_env
*env
, void *cookie
,
1411 lu_printer_t printer
, const struct cl_io
*io
)
1416 * Adds request slice to the compound request.
1418 * This is called by cl_device_operations::cdo_req_init() methods to add a
1419 * per-layer state to the request. New state is added at the end of
1420 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1422 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1424 void cl_req_slice_add(struct cl_req
*req
, struct cl_req_slice
*slice
,
1425 struct cl_device
*dev
,
1426 const struct cl_req_operations
*ops
)
1429 list_add_tail(&slice
->crs_linkage
, &req
->crq_layers
);
1430 slice
->crs_dev
= dev
;
1431 slice
->crs_ops
= ops
;
1432 slice
->crs_req
= req
;
1435 EXPORT_SYMBOL(cl_req_slice_add
);
1437 static void cl_req_free(const struct lu_env
*env
, struct cl_req
*req
)
1441 LASSERT(list_empty(&req
->crq_pages
));
1442 LASSERT(req
->crq_nrpages
== 0);
1443 LINVRNT(list_empty(&req
->crq_layers
));
1444 LINVRNT(equi(req
->crq_nrobjs
> 0, req
->crq_o
!= NULL
));
1447 if (req
->crq_o
!= NULL
) {
1448 for (i
= 0; i
< req
->crq_nrobjs
; ++i
) {
1449 struct cl_object
*obj
= req
->crq_o
[i
].ro_obj
;
1451 lu_object_ref_del_at(&obj
->co_lu
,
1452 req
->crq_o
[i
].ro_obj_ref
,
1454 cl_object_put(env
, obj
);
1457 OBD_FREE(req
->crq_o
, req
->crq_nrobjs
* sizeof req
->crq_o
[0]);
1463 static int cl_req_init(const struct lu_env
*env
, struct cl_req
*req
,
1464 struct cl_page
*page
)
1466 struct cl_device
*dev
;
1467 struct cl_page_slice
*slice
;
1472 page
= cl_page_top(page
);
1474 list_for_each_entry(slice
, &page
->cp_layers
, cpl_linkage
) {
1475 dev
= lu2cl_dev(slice
->cpl_obj
->co_lu
.lo_dev
);
1476 if (dev
->cd_ops
->cdo_req_init
!= NULL
) {
1477 result
= dev
->cd_ops
->cdo_req_init(env
,
1483 page
= page
->cp_child
;
1484 } while (page
!= NULL
&& result
== 0);
1489 * Invokes per-request transfer completion call-backs
1490 * (cl_req_operations::cro_completion()) bottom-to-top.
1492 void cl_req_completion(const struct lu_env
*env
, struct cl_req
*req
, int rc
)
1494 struct cl_req_slice
*slice
;
1498 * for the lack of list_for_each_entry_reverse_safe()...
1500 while (!list_empty(&req
->crq_layers
)) {
1501 slice
= list_entry(req
->crq_layers
.prev
,
1502 struct cl_req_slice
, crs_linkage
);
1503 list_del_init(&slice
->crs_linkage
);
1504 if (slice
->crs_ops
->cro_completion
!= NULL
)
1505 slice
->crs_ops
->cro_completion(env
, slice
, rc
);
1507 cl_req_free(env
, req
);
1510 EXPORT_SYMBOL(cl_req_completion
);
1513 * Allocates new transfer request.
1515 struct cl_req
*cl_req_alloc(const struct lu_env
*env
, struct cl_page
*page
,
1516 enum cl_req_type crt
, int nr_objects
)
1520 LINVRNT(nr_objects
> 0);
1527 OBD_ALLOC(req
->crq_o
, nr_objects
* sizeof req
->crq_o
[0]);
1528 if (req
->crq_o
!= NULL
) {
1529 req
->crq_nrobjs
= nr_objects
;
1530 req
->crq_type
= crt
;
1531 INIT_LIST_HEAD(&req
->crq_pages
);
1532 INIT_LIST_HEAD(&req
->crq_layers
);
1533 result
= cl_req_init(env
, req
, page
);
1537 cl_req_completion(env
, req
, result
);
1538 req
= ERR_PTR(result
);
1541 req
= ERR_PTR(-ENOMEM
);
1544 EXPORT_SYMBOL(cl_req_alloc
);
1547 * Adds a page to a request.
1549 void cl_req_page_add(const struct lu_env
*env
,
1550 struct cl_req
*req
, struct cl_page
*page
)
1552 struct cl_object
*obj
;
1553 struct cl_req_obj
*rqo
;
1557 page
= cl_page_top(page
);
1559 LASSERT(list_empty(&page
->cp_flight
));
1560 LASSERT(page
->cp_req
== NULL
);
1562 CL_PAGE_DEBUG(D_PAGE
, env
, page
, "req %p, %d, %u\n",
1563 req
, req
->crq_type
, req
->crq_nrpages
);
1565 list_add_tail(&page
->cp_flight
, &req
->crq_pages
);
1568 obj
= cl_object_top(page
->cp_obj
);
1569 for (i
= 0, rqo
= req
->crq_o
; obj
!= rqo
->ro_obj
; ++i
, ++rqo
) {
1570 if (rqo
->ro_obj
== NULL
) {
1573 rqo
->ro_obj_ref
= lu_object_ref_add(&obj
->co_lu
,
1578 LASSERT(i
< req
->crq_nrobjs
);
1581 EXPORT_SYMBOL(cl_req_page_add
);
1584 * Removes a page from a request.
1586 void cl_req_page_done(const struct lu_env
*env
, struct cl_page
*page
)
1588 struct cl_req
*req
= page
->cp_req
;
1591 page
= cl_page_top(page
);
1593 LASSERT(!list_empty(&page
->cp_flight
));
1594 LASSERT(req
->crq_nrpages
> 0);
1596 list_del_init(&page
->cp_flight
);
1598 page
->cp_req
= NULL
;
1601 EXPORT_SYMBOL(cl_req_page_done
);
1604 * Notifies layers that request is about to depart by calling
1605 * cl_req_operations::cro_prep() top-to-bottom.
1607 int cl_req_prep(const struct lu_env
*env
, struct cl_req
*req
)
1611 const struct cl_req_slice
*slice
;
1615 * Check that the caller of cl_req_alloc() didn't lie about the number
1618 for (i
= 0; i
< req
->crq_nrobjs
; ++i
)
1619 LASSERT(req
->crq_o
[i
].ro_obj
!= NULL
);
1622 list_for_each_entry(slice
, &req
->crq_layers
, crs_linkage
) {
1623 if (slice
->crs_ops
->cro_prep
!= NULL
) {
1624 result
= slice
->crs_ops
->cro_prep(env
, slice
);
1631 EXPORT_SYMBOL(cl_req_prep
);
1634 * Fills in attributes that are passed to server together with transfer. Only
1635 * attributes from \a flags may be touched. This can be called multiple times
1636 * for the same request.
1638 void cl_req_attr_set(const struct lu_env
*env
, struct cl_req
*req
,
1639 struct cl_req_attr
*attr
, obd_valid flags
)
1641 const struct cl_req_slice
*slice
;
1642 struct cl_page
*page
;
1645 LASSERT(!list_empty(&req
->crq_pages
));
1648 /* Take any page to use as a model. */
1649 page
= list_entry(req
->crq_pages
.next
, struct cl_page
, cp_flight
);
1651 for (i
= 0; i
< req
->crq_nrobjs
; ++i
) {
1652 list_for_each_entry(slice
, &req
->crq_layers
, crs_linkage
) {
1653 const struct cl_page_slice
*scan
;
1654 const struct cl_object
*obj
;
1656 scan
= cl_page_at(page
,
1657 slice
->crs_dev
->cd_lu_dev
.ld_type
);
1658 LASSERT(scan
!= NULL
);
1659 obj
= scan
->cpl_obj
;
1660 if (slice
->crs_ops
->cro_attr_set
!= NULL
)
1661 slice
->crs_ops
->cro_attr_set(env
, slice
, obj
,
1667 EXPORT_SYMBOL(cl_req_attr_set
);
1669 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1670 * implemented in libcfs. */
1671 # include <linux/sched.h>
1674 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1676 void cl_sync_io_init(struct cl_sync_io
*anchor
, int nrpages
)
1679 init_waitqueue_head(&anchor
->csi_waitq
);
1680 atomic_set(&anchor
->csi_sync_nr
, nrpages
);
1681 atomic_set(&anchor
->csi_barrier
, nrpages
> 0);
1682 anchor
->csi_sync_rc
= 0;
1685 EXPORT_SYMBOL(cl_sync_io_init
);
1688 * Wait until all transfer completes. Transfer completion routine has to call
1689 * cl_sync_io_note() for every page.
1691 int cl_sync_io_wait(const struct lu_env
*env
, struct cl_io
*io
,
1692 struct cl_page_list
*queue
, struct cl_sync_io
*anchor
,
1695 struct l_wait_info lwi
= LWI_TIMEOUT_INTR(cfs_time_seconds(timeout
),
1700 LASSERT(timeout
>= 0);
1702 rc
= l_wait_event(anchor
->csi_waitq
,
1703 atomic_read(&anchor
->csi_sync_nr
) == 0,
1706 CERROR("SYNC IO failed with error: %d, try to cancel "
1707 "%d remaining pages\n",
1708 rc
, atomic_read(&anchor
->csi_sync_nr
));
1710 (void)cl_io_cancel(env
, io
, queue
);
1712 lwi
= (struct l_wait_info
) { 0 };
1713 (void)l_wait_event(anchor
->csi_waitq
,
1714 atomic_read(&anchor
->csi_sync_nr
) == 0,
1717 rc
= anchor
->csi_sync_rc
;
1719 LASSERT(atomic_read(&anchor
->csi_sync_nr
) == 0);
1720 cl_page_list_assume(env
, io
, queue
);
1722 /* wait until cl_sync_io_note() has done wakeup */
1723 while (unlikely(atomic_read(&anchor
->csi_barrier
) != 0)) {
1727 POISON(anchor
, 0x5a, sizeof *anchor
);
1730 EXPORT_SYMBOL(cl_sync_io_wait
);
1733 * Indicate that transfer of a single page completed.
1735 void cl_sync_io_note(struct cl_sync_io
*anchor
, int ioret
)
1738 if (anchor
->csi_sync_rc
== 0 && ioret
< 0)
1739 anchor
->csi_sync_rc
= ioret
;
1741 * Synchronous IO done without releasing page lock (e.g., as a part of
1742 * ->{prepare,commit}_write(). Completion is used to signal the end of
1745 LASSERT(atomic_read(&anchor
->csi_sync_nr
) > 0);
1746 if (atomic_dec_and_test(&anchor
->csi_sync_nr
)) {
1747 wake_up_all(&anchor
->csi_waitq
);
1748 /* it's safe to nuke or reuse anchor now */
1749 atomic_set(&anchor
->csi_barrier
, 0);
1753 EXPORT_SYMBOL(cl_sync_io_note
);
This page took 0.065597 seconds and 6 git commands to generate.