Merge 3.11-rc3 into staging-next
[deliverable/linux.git] / drivers / staging / lustre / lustre / obdclass / cl_io.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Client IO.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <linux/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
49
50 /*****************************************************************************
51 *
52 * cl_io interface.
53 *
54 */
55
56 #define cl_io_for_each(slice, io) \
57 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
58 #define cl_io_for_each_reverse(slice, io) \
59 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
60
61 static inline int cl_io_type_is_valid(enum cl_io_type type)
62 {
63 return CIT_READ <= type && type < CIT_OP_NR;
64 }
65
66 static inline int cl_io_is_loopable(const struct cl_io *io)
67 {
68 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
69 }
70
71 /**
72 * Returns true iff there is an IO ongoing in the given environment.
73 */
74 int cl_io_is_going(const struct lu_env *env)
75 {
76 return cl_env_info(env)->clt_current_io != NULL;
77 }
78 EXPORT_SYMBOL(cl_io_is_going);
79
80 /**
81 * cl_io invariant that holds at all times when exported cl_io_*() functions
82 * are entered and left.
83 */
84 static int cl_io_invariant(const struct cl_io *io)
85 {
86 struct cl_io *up;
87
88 up = io->ci_parent;
89 return
90 /*
91 * io can own pages only when it is ongoing. Sub-io might
92 * still be in CIS_LOCKED state when top-io is in
93 * CIS_IO_GOING.
94 */
95 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
96 (io->ci_state == CIS_LOCKED && up != NULL));
97 }
98
99 /**
100 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
101 */
102 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
103 {
104 struct cl_io_slice *slice;
105 struct cl_thread_info *info;
106
107 LINVRNT(cl_io_type_is_valid(io->ci_type));
108 LINVRNT(cl_io_invariant(io));
109 ENTRY;
110
111 while (!list_empty(&io->ci_layers)) {
112 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
113 cis_linkage);
114 list_del_init(&slice->cis_linkage);
115 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
116 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
117 /*
118 * Invalidate slice to catch use after free. This assumes that
119 * slices are allocated within session and can be touched
120 * after ->cio_fini() returns.
121 */
122 slice->cis_io = NULL;
123 }
124 io->ci_state = CIS_FINI;
125 info = cl_env_info(env);
126 if (info->clt_current_io == io)
127 info->clt_current_io = NULL;
128
129 /* sanity check for layout change */
130 switch(io->ci_type) {
131 case CIT_READ:
132 case CIT_WRITE:
133 break;
134 case CIT_FAULT:
135 case CIT_FSYNC:
136 LASSERT(!io->ci_need_restart);
137 break;
138 case CIT_SETATTR:
139 case CIT_MISC:
140 /* Check ignore layout change conf */
141 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
142 !io->ci_need_restart));
143 break;
144 default:
145 LBUG();
146 }
147 EXIT;
148 }
149 EXPORT_SYMBOL(cl_io_fini);
150
151 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
152 enum cl_io_type iot, struct cl_object *obj)
153 {
154 struct cl_object *scan;
155 int result;
156
157 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
158 LINVRNT(cl_io_type_is_valid(iot));
159 LINVRNT(cl_io_invariant(io));
160 ENTRY;
161
162 io->ci_type = iot;
163 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
164 INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
165 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
166 INIT_LIST_HEAD(&io->ci_layers);
167
168 result = 0;
169 cl_object_for_each(scan, obj) {
170 if (scan->co_ops->coo_io_init != NULL) {
171 result = scan->co_ops->coo_io_init(env, scan, io);
172 if (result != 0)
173 break;
174 }
175 }
176 if (result == 0)
177 io->ci_state = CIS_INIT;
178 RETURN(result);
179 }
180
181 /**
182 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
183 *
184 * \pre obj != cl_object_top(obj)
185 */
186 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
187 enum cl_io_type iot, struct cl_object *obj)
188 {
189 struct cl_thread_info *info = cl_env_info(env);
190
191 LASSERT(obj != cl_object_top(obj));
192 if (info->clt_current_io == NULL)
193 info->clt_current_io = io;
194 return cl_io_init0(env, io, iot, obj);
195 }
196 EXPORT_SYMBOL(cl_io_sub_init);
197
198 /**
199 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
200 *
201 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
202 * what the latter returned.
203 *
204 * \pre obj == cl_object_top(obj)
205 * \pre cl_io_type_is_valid(iot)
206 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
207 */
208 int cl_io_init(const struct lu_env *env, struct cl_io *io,
209 enum cl_io_type iot, struct cl_object *obj)
210 {
211 struct cl_thread_info *info = cl_env_info(env);
212
213 LASSERT(obj == cl_object_top(obj));
214 LASSERT(info->clt_current_io == NULL);
215
216 info->clt_current_io = io;
217 return cl_io_init0(env, io, iot, obj);
218 }
219 EXPORT_SYMBOL(cl_io_init);
220
221 /**
222 * Initialize read or write io.
223 *
224 * \pre iot == CIT_READ || iot == CIT_WRITE
225 */
226 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
227 enum cl_io_type iot, loff_t pos, size_t count)
228 {
229 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
230 LINVRNT(io->ci_obj != NULL);
231 ENTRY;
232
233 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
234 "io range: %u ["LPU64", "LPU64") %u %u\n",
235 iot, (__u64)pos, (__u64)pos + count,
236 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
237 io->u.ci_rw.crw_pos = pos;
238 io->u.ci_rw.crw_count = count;
239 RETURN(cl_io_init(env, io, iot, io->ci_obj));
240 }
241 EXPORT_SYMBOL(cl_io_rw_init);
242
243 static inline const struct lu_fid *
244 cl_lock_descr_fid(const struct cl_lock_descr *descr)
245 {
246 return lu_object_fid(&descr->cld_obj->co_lu);
247 }
248
249 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
250 const struct cl_lock_descr *d1)
251 {
252 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
253 __diff_normalize(d0->cld_start, d1->cld_start);
254 }
255
256 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
257 const struct cl_lock_descr *d1)
258 {
259 int ret;
260
261 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
262 if (ret)
263 return ret;
264 if (d0->cld_end < d1->cld_start)
265 return -1;
266 if (d0->cld_start > d0->cld_end)
267 return 1;
268 return 0;
269 }
270
271 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
272 const struct cl_lock_descr *d1)
273 {
274 d0->cld_start = min(d0->cld_start, d1->cld_start);
275 d0->cld_end = max(d0->cld_end, d1->cld_end);
276
277 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
278 d0->cld_mode = CLM_WRITE;
279
280 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
281 d0->cld_mode = CLM_GROUP;
282 }
283
284 /*
285 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
286 */
287 static void cl_io_locks_sort(struct cl_io *io)
288 {
289 int done = 0;
290
291 ENTRY;
292 /* hidden treasure: bubble sort for now. */
293 do {
294 struct cl_io_lock_link *curr;
295 struct cl_io_lock_link *prev;
296 struct cl_io_lock_link *temp;
297
298 done = 1;
299 prev = NULL;
300
301 list_for_each_entry_safe(curr, temp,
302 &io->ci_lockset.cls_todo,
303 cill_linkage) {
304 if (prev != NULL) {
305 switch (cl_lock_descr_sort(&prev->cill_descr,
306 &curr->cill_descr)) {
307 case 0:
308 /*
309 * IMPOSSIBLE: Identical locks are
310 * already removed at
311 * this point.
312 */
313 default:
314 LBUG();
315 case +1:
316 list_move_tail(&curr->cill_linkage,
317 &prev->cill_linkage);
318 done = 0;
319 continue; /* don't change prev: it's
320 * still "previous" */
321 case -1: /* already in order */
322 break;
323 }
324 }
325 prev = curr;
326 }
327 } while (!done);
328 EXIT;
329 }
330
331 /**
332 * Check whether \a queue contains locks matching \a need.
333 *
334 * \retval +ve there is a matching lock in the \a queue
335 * \retval 0 there are no matching locks in the \a queue
336 */
337 int cl_queue_match(const struct list_head *queue,
338 const struct cl_lock_descr *need)
339 {
340 struct cl_io_lock_link *scan;
341
342 ENTRY;
343 list_for_each_entry(scan, queue, cill_linkage) {
344 if (cl_lock_descr_match(&scan->cill_descr, need))
345 RETURN(+1);
346 }
347 RETURN(0);
348 }
349 EXPORT_SYMBOL(cl_queue_match);
350
351 static int cl_queue_merge(const struct list_head *queue,
352 const struct cl_lock_descr *need)
353 {
354 struct cl_io_lock_link *scan;
355
356 ENTRY;
357 list_for_each_entry(scan, queue, cill_linkage) {
358 if (cl_lock_descr_cmp(&scan->cill_descr, need))
359 continue;
360 cl_lock_descr_merge(&scan->cill_descr, need);
361 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
362 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
363 scan->cill_descr.cld_end);
364 RETURN(+1);
365 }
366 RETURN(0);
367
368 }
369
370 static int cl_lockset_match(const struct cl_lockset *set,
371 const struct cl_lock_descr *need)
372 {
373 return cl_queue_match(&set->cls_curr, need) ||
374 cl_queue_match(&set->cls_done, need);
375 }
376
377 static int cl_lockset_merge(const struct cl_lockset *set,
378 const struct cl_lock_descr *need)
379 {
380 return cl_queue_merge(&set->cls_todo, need) ||
381 cl_lockset_match(set, need);
382 }
383
384 static int cl_lockset_lock_one(const struct lu_env *env,
385 struct cl_io *io, struct cl_lockset *set,
386 struct cl_io_lock_link *link)
387 {
388 struct cl_lock *lock;
389 int result;
390
391 ENTRY;
392
393 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
394
395 if (!IS_ERR(lock)) {
396 link->cill_lock = lock;
397 list_move(&link->cill_linkage, &set->cls_curr);
398 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
399 result = cl_wait(env, lock);
400 if (result == 0)
401 list_move(&link->cill_linkage,
402 &set->cls_done);
403 } else
404 result = 0;
405 } else
406 result = PTR_ERR(lock);
407 RETURN(result);
408 }
409
410 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
411 struct cl_io_lock_link *link)
412 {
413 struct cl_lock *lock = link->cill_lock;
414
415 ENTRY;
416 list_del_init(&link->cill_linkage);
417 if (lock != NULL) {
418 cl_lock_release(env, lock, "io", io);
419 link->cill_lock = NULL;
420 }
421 if (link->cill_fini != NULL)
422 link->cill_fini(env, link);
423 EXIT;
424 }
425
426 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
427 struct cl_lockset *set)
428 {
429 struct cl_io_lock_link *link;
430 struct cl_io_lock_link *temp;
431 struct cl_lock *lock;
432 int result;
433
434 ENTRY;
435 result = 0;
436 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
437 if (!cl_lockset_match(set, &link->cill_descr)) {
438 /* XXX some locking to guarantee that locks aren't
439 * expanded in between. */
440 result = cl_lockset_lock_one(env, io, set, link);
441 if (result != 0)
442 break;
443 } else
444 cl_lock_link_fini(env, io, link);
445 }
446 if (result == 0) {
447 list_for_each_entry_safe(link, temp,
448 &set->cls_curr, cill_linkage) {
449 lock = link->cill_lock;
450 result = cl_wait(env, lock);
451 if (result == 0)
452 list_move(&link->cill_linkage,
453 &set->cls_done);
454 else
455 break;
456 }
457 }
458 RETURN(result);
459 }
460
461 /**
462 * Takes locks necessary for the current iteration of io.
463 *
464 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
465 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
466 * and acquire them.
467 */
468 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
469 {
470 const struct cl_io_slice *scan;
471 int result = 0;
472
473 LINVRNT(cl_io_is_loopable(io));
474 LINVRNT(io->ci_state == CIS_IT_STARTED);
475 LINVRNT(cl_io_invariant(io));
476
477 ENTRY;
478 cl_io_for_each(scan, io) {
479 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
480 continue;
481 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
482 if (result != 0)
483 break;
484 }
485 if (result == 0) {
486 cl_io_locks_sort(io);
487 result = cl_lockset_lock(env, io, &io->ci_lockset);
488 }
489 if (result != 0)
490 cl_io_unlock(env, io);
491 else
492 io->ci_state = CIS_LOCKED;
493 RETURN(result);
494 }
495 EXPORT_SYMBOL(cl_io_lock);
496
497 /**
498 * Release locks takes by io.
499 */
500 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
501 {
502 struct cl_lockset *set;
503 struct cl_io_lock_link *link;
504 struct cl_io_lock_link *temp;
505 const struct cl_io_slice *scan;
506
507 LASSERT(cl_io_is_loopable(io));
508 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
509 LINVRNT(cl_io_invariant(io));
510
511 ENTRY;
512 set = &io->ci_lockset;
513
514 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
515 cl_lock_link_fini(env, io, link);
516
517 list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
518 cl_lock_link_fini(env, io, link);
519
520 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
521 cl_unuse(env, link->cill_lock);
522 cl_lock_link_fini(env, io, link);
523 }
524 cl_io_for_each_reverse(scan, io) {
525 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
526 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
527 }
528 io->ci_state = CIS_UNLOCKED;
529 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
530 EXIT;
531 }
532 EXPORT_SYMBOL(cl_io_unlock);
533
534 /**
535 * Prepares next iteration of io.
536 *
537 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
538 * layers a chance to modify io parameters, e.g., so that lov can restrict io
539 * to a single stripe.
540 */
541 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
542 {
543 const struct cl_io_slice *scan;
544 int result;
545
546 LINVRNT(cl_io_is_loopable(io));
547 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
548 LINVRNT(cl_io_invariant(io));
549
550 ENTRY;
551 result = 0;
552 cl_io_for_each(scan, io) {
553 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
554 continue;
555 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
556 scan);
557 if (result != 0)
558 break;
559 }
560 if (result == 0)
561 io->ci_state = CIS_IT_STARTED;
562 RETURN(result);
563 }
564 EXPORT_SYMBOL(cl_io_iter_init);
565
566 /**
567 * Finalizes io iteration.
568 *
569 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
570 */
571 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
572 {
573 const struct cl_io_slice *scan;
574
575 LINVRNT(cl_io_is_loopable(io));
576 LINVRNT(io->ci_state == CIS_UNLOCKED);
577 LINVRNT(cl_io_invariant(io));
578
579 ENTRY;
580 cl_io_for_each_reverse(scan, io) {
581 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
582 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
583 }
584 io->ci_state = CIS_IT_ENDED;
585 EXIT;
586 }
587 EXPORT_SYMBOL(cl_io_iter_fini);
588
589 /**
590 * Records that read or write io progressed \a nob bytes forward.
591 */
592 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
593 {
594 const struct cl_io_slice *scan;
595
596 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
597 nob == 0);
598 LINVRNT(cl_io_is_loopable(io));
599 LINVRNT(cl_io_invariant(io));
600
601 ENTRY;
602
603 io->u.ci_rw.crw_pos += nob;
604 io->u.ci_rw.crw_count -= nob;
605
606 /* layers have to be notified. */
607 cl_io_for_each_reverse(scan, io) {
608 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
609 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
610 nob);
611 }
612 EXIT;
613 }
614 EXPORT_SYMBOL(cl_io_rw_advance);
615
616 /**
617 * Adds a lock to a lockset.
618 */
619 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
620 struct cl_io_lock_link *link)
621 {
622 int result;
623
624 ENTRY;
625 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
626 result = +1;
627 else {
628 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
629 result = 0;
630 }
631 RETURN(result);
632 }
633 EXPORT_SYMBOL(cl_io_lock_add);
634
635 static void cl_free_io_lock_link(const struct lu_env *env,
636 struct cl_io_lock_link *link)
637 {
638 OBD_FREE_PTR(link);
639 }
640
641 /**
642 * Allocates new lock link, and uses it to add a lock to a lockset.
643 */
644 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
645 struct cl_lock_descr *descr)
646 {
647 struct cl_io_lock_link *link;
648 int result;
649
650 ENTRY;
651 OBD_ALLOC_PTR(link);
652 if (link != NULL) {
653 link->cill_descr = *descr;
654 link->cill_fini = cl_free_io_lock_link;
655 result = cl_io_lock_add(env, io, link);
656 if (result) /* lock match */
657 link->cill_fini(env, link);
658 } else
659 result = -ENOMEM;
660
661 RETURN(result);
662 }
663 EXPORT_SYMBOL(cl_io_lock_alloc_add);
664
665 /**
666 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
667 */
668 int cl_io_start(const struct lu_env *env, struct cl_io *io)
669 {
670 const struct cl_io_slice *scan;
671 int result = 0;
672
673 LINVRNT(cl_io_is_loopable(io));
674 LINVRNT(io->ci_state == CIS_LOCKED);
675 LINVRNT(cl_io_invariant(io));
676 ENTRY;
677
678 io->ci_state = CIS_IO_GOING;
679 cl_io_for_each(scan, io) {
680 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
681 continue;
682 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
683 if (result != 0)
684 break;
685 }
686 if (result >= 0)
687 result = 0;
688 RETURN(result);
689 }
690 EXPORT_SYMBOL(cl_io_start);
691
692 /**
693 * Wait until current io iteration is finished by calling
694 * cl_io_operations::cio_end() bottom-to-top.
695 */
696 void cl_io_end(const struct lu_env *env, struct cl_io *io)
697 {
698 const struct cl_io_slice *scan;
699
700 LINVRNT(cl_io_is_loopable(io));
701 LINVRNT(io->ci_state == CIS_IO_GOING);
702 LINVRNT(cl_io_invariant(io));
703 ENTRY;
704
705 cl_io_for_each_reverse(scan, io) {
706 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
707 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
708 /* TODO: error handling. */
709 }
710 io->ci_state = CIS_IO_FINISHED;
711 EXIT;
712 }
713 EXPORT_SYMBOL(cl_io_end);
714
715 static const struct cl_page_slice *
716 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
717 {
718 const struct cl_page_slice *slice;
719
720 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
721 LINVRNT(slice != NULL);
722 return slice;
723 }
724
725 /**
726 * True iff \a page is within \a io range.
727 */
728 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
729 {
730 int result = 1;
731 loff_t start;
732 loff_t end;
733 pgoff_t idx;
734
735 idx = page->cp_index;
736 switch (io->ci_type) {
737 case CIT_READ:
738 case CIT_WRITE:
739 /*
740 * check that [start, end) and [pos, pos + count) extents
741 * overlap.
742 */
743 if (!cl_io_is_append(io)) {
744 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
745 start = cl_offset(page->cp_obj, idx);
746 end = cl_offset(page->cp_obj, idx + 1);
747 result = crw->crw_pos < end &&
748 start < crw->crw_pos + crw->crw_count;
749 }
750 break;
751 case CIT_FAULT:
752 result = io->u.ci_fault.ft_index == idx;
753 break;
754 default:
755 LBUG();
756 }
757 return result;
758 }
759
760 /**
761 * Called by read io, when page has to be read from the server.
762 *
763 * \see cl_io_operations::cio_read_page()
764 */
765 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
766 struct cl_page *page)
767 {
768 const struct cl_io_slice *scan;
769 struct cl_2queue *queue;
770 int result = 0;
771
772 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
773 LINVRNT(cl_page_is_owned(page, io));
774 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
775 LINVRNT(cl_page_in_io(page, io));
776 LINVRNT(cl_io_invariant(io));
777 ENTRY;
778
779 queue = &io->ci_queue;
780
781 cl_2queue_init(queue);
782 /*
783 * ->cio_read_page() methods called in the loop below are supposed to
784 * never block waiting for network (the only subtle point is the
785 * creation of new pages for read-ahead that might result in cache
786 * shrinking, but currently only clean pages are shrunk and this
787 * requires no network io).
788 *
789 * Should this ever starts blocking, retry loop would be needed for
790 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
791 */
792 cl_io_for_each(scan, io) {
793 if (scan->cis_iop->cio_read_page != NULL) {
794 const struct cl_page_slice *slice;
795
796 slice = cl_io_slice_page(scan, page);
797 LINVRNT(slice != NULL);
798 result = scan->cis_iop->cio_read_page(env, scan, slice);
799 if (result != 0)
800 break;
801 }
802 }
803 if (result == 0)
804 result = cl_io_submit_rw(env, io, CRT_READ, queue);
805 /*
806 * Unlock unsent pages in case of error.
807 */
808 cl_page_list_disown(env, io, &queue->c2_qin);
809 cl_2queue_fini(env, queue);
810 RETURN(result);
811 }
812 EXPORT_SYMBOL(cl_io_read_page);
813
814 /**
815 * Called by write io to prepare page to receive data from user buffer.
816 *
817 * \see cl_io_operations::cio_prepare_write()
818 */
819 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
820 struct cl_page *page, unsigned from, unsigned to)
821 {
822 const struct cl_io_slice *scan;
823 int result = 0;
824
825 LINVRNT(io->ci_type == CIT_WRITE);
826 LINVRNT(cl_page_is_owned(page, io));
827 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
828 LINVRNT(cl_io_invariant(io));
829 LASSERT(cl_page_in_io(page, io));
830 ENTRY;
831
832 cl_io_for_each_reverse(scan, io) {
833 if (scan->cis_iop->cio_prepare_write != NULL) {
834 const struct cl_page_slice *slice;
835
836 slice = cl_io_slice_page(scan, page);
837 result = scan->cis_iop->cio_prepare_write(env, scan,
838 slice,
839 from, to);
840 if (result != 0)
841 break;
842 }
843 }
844 RETURN(result);
845 }
846 EXPORT_SYMBOL(cl_io_prepare_write);
847
848 /**
849 * Called by write io after user data were copied into a page.
850 *
851 * \see cl_io_operations::cio_commit_write()
852 */
853 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
854 struct cl_page *page, unsigned from, unsigned to)
855 {
856 const struct cl_io_slice *scan;
857 int result = 0;
858
859 LINVRNT(io->ci_type == CIT_WRITE);
860 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
861 LINVRNT(cl_io_invariant(io));
862 /*
863 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
864 * already called cl_page_cache_add(), moving page into CPS_CACHED
865 * state. Better (and more general) way of dealing with such situation
866 * is needed.
867 */
868 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
869 LASSERT(cl_page_in_io(page, io));
870 ENTRY;
871
872 cl_io_for_each(scan, io) {
873 if (scan->cis_iop->cio_commit_write != NULL) {
874 const struct cl_page_slice *slice;
875
876 slice = cl_io_slice_page(scan, page);
877 result = scan->cis_iop->cio_commit_write(env, scan,
878 slice,
879 from, to);
880 if (result != 0)
881 break;
882 }
883 }
884 LINVRNT(result <= 0);
885 RETURN(result);
886 }
887 EXPORT_SYMBOL(cl_io_commit_write);
888
889 /**
890 * Submits a list of pages for immediate io.
891 *
892 * After the function gets returned, The submitted pages are moved to
893 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
894 * to be submitted, and the pages are errant to submit.
895 *
896 * \returns 0 if at least one page was submitted, error code otherwise.
897 * \see cl_io_operations::cio_submit()
898 */
899 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
900 enum cl_req_type crt, struct cl_2queue *queue)
901 {
902 const struct cl_io_slice *scan;
903 int result = 0;
904
905 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
906 ENTRY;
907
908 cl_io_for_each(scan, io) {
909 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
910 continue;
911 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
912 queue);
913 if (result != 0)
914 break;
915 }
916 /*
917 * If ->cio_submit() failed, no pages were sent.
918 */
919 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
920 RETURN(result);
921 }
922 EXPORT_SYMBOL(cl_io_submit_rw);
923
924 /**
925 * Submit a sync_io and wait for the IO to be finished, or error happens.
926 * If \a timeout is zero, it means to wait for the IO unconditionally.
927 */
928 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
929 enum cl_req_type iot, struct cl_2queue *queue,
930 long timeout)
931 {
932 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
933 struct cl_page *pg;
934 int rc;
935
936 cl_page_list_for_each(pg, &queue->c2_qin) {
937 LASSERT(pg->cp_sync_io == NULL);
938 pg->cp_sync_io = anchor;
939 }
940
941 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
942 rc = cl_io_submit_rw(env, io, iot, queue);
943 if (rc == 0) {
944 /*
945 * If some pages weren't sent for any reason (e.g.,
946 * read found up-to-date pages in the cache, or write found
947 * clean pages), count them as completed to avoid infinite
948 * wait.
949 */
950 cl_page_list_for_each(pg, &queue->c2_qin) {
951 pg->cp_sync_io = NULL;
952 cl_sync_io_note(anchor, +1);
953 }
954
955 /* wait for the IO to be finished. */
956 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
957 anchor, timeout);
958 } else {
959 LASSERT(list_empty(&queue->c2_qout.pl_pages));
960 cl_page_list_for_each(pg, &queue->c2_qin)
961 pg->cp_sync_io = NULL;
962 }
963 return rc;
964 }
965 EXPORT_SYMBOL(cl_io_submit_sync);
966
967 /**
968 * Cancel an IO which has been submitted by cl_io_submit_rw.
969 */
970 int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
971 struct cl_page_list *queue)
972 {
973 struct cl_page *page;
974 int result = 0;
975
976 CERROR("Canceling ongoing page trasmission\n");
977 cl_page_list_for_each(page, queue) {
978 int rc;
979
980 LINVRNT(cl_page_in_io(page, io));
981 rc = cl_page_cancel(env, page);
982 result = result ?: rc;
983 }
984 return result;
985 }
986 EXPORT_SYMBOL(cl_io_cancel);
987
988 /**
989 * Main io loop.
990 *
991 * Pumps io through iterations calling
992 *
993 * - cl_io_iter_init()
994 *
995 * - cl_io_lock()
996 *
997 * - cl_io_start()
998 *
999 * - cl_io_end()
1000 *
1001 * - cl_io_unlock()
1002 *
1003 * - cl_io_iter_fini()
1004 *
1005 * repeatedly until there is no more io to do.
1006 */
1007 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
1008 {
1009 int result = 0;
1010
1011 LINVRNT(cl_io_is_loopable(io));
1012 ENTRY;
1013
1014 do {
1015 size_t nob;
1016
1017 io->ci_continue = 0;
1018 result = cl_io_iter_init(env, io);
1019 if (result == 0) {
1020 nob = io->ci_nob;
1021 result = cl_io_lock(env, io);
1022 if (result == 0) {
1023 /*
1024 * Notify layers that locks has been taken,
1025 * and do actual i/o.
1026 *
1027 * - llite: kms, short read;
1028 * - llite: generic_file_read();
1029 */
1030 result = cl_io_start(env, io);
1031 /*
1032 * Send any remaining pending
1033 * io, etc.
1034 *
1035 * - llite: ll_rw_stats_tally.
1036 */
1037 cl_io_end(env, io);
1038 cl_io_unlock(env, io);
1039 cl_io_rw_advance(env, io, io->ci_nob - nob);
1040 }
1041 }
1042 cl_io_iter_fini(env, io);
1043 } while (result == 0 && io->ci_continue);
1044 if (result == 0)
1045 result = io->ci_result;
1046 RETURN(result < 0 ? result : 0);
1047 }
1048 EXPORT_SYMBOL(cl_io_loop);
1049
1050 /**
1051 * Adds io slice to the cl_io.
1052 *
1053 * This is called by cl_object_operations::coo_io_init() methods to add a
1054 * per-layer state to the io. New state is added at the end of
1055 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1056 *
1057 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1058 */
1059 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
1060 struct cl_object *obj,
1061 const struct cl_io_operations *ops)
1062 {
1063 struct list_head *linkage = &slice->cis_linkage;
1064
1065 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1066 list_empty(linkage));
1067 ENTRY;
1068
1069 list_add_tail(linkage, &io->ci_layers);
1070 slice->cis_io = io;
1071 slice->cis_obj = obj;
1072 slice->cis_iop = ops;
1073 EXIT;
1074 }
1075 EXPORT_SYMBOL(cl_io_slice_add);
1076
1077
1078 /**
1079 * Initializes page list.
1080 */
1081 void cl_page_list_init(struct cl_page_list *plist)
1082 {
1083 ENTRY;
1084 plist->pl_nr = 0;
1085 INIT_LIST_HEAD(&plist->pl_pages);
1086 plist->pl_owner = current;
1087 EXIT;
1088 }
1089 EXPORT_SYMBOL(cl_page_list_init);
1090
1091 /**
1092 * Adds a page to a page list.
1093 */
1094 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1095 {
1096 ENTRY;
1097 /* it would be better to check that page is owned by "current" io, but
1098 * it is not passed here. */
1099 LASSERT(page->cp_owner != NULL);
1100 LINVRNT(plist->pl_owner == current);
1101
1102 lockdep_off();
1103 mutex_lock(&page->cp_mutex);
1104 lockdep_on();
1105 LASSERT(list_empty(&page->cp_batch));
1106 list_add_tail(&page->cp_batch, &plist->pl_pages);
1107 ++plist->pl_nr;
1108 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1109 cl_page_get(page);
1110 EXIT;
1111 }
1112 EXPORT_SYMBOL(cl_page_list_add);
1113
1114 /**
1115 * Removes a page from a page list.
1116 */
1117 void cl_page_list_del(const struct lu_env *env,
1118 struct cl_page_list *plist, struct cl_page *page)
1119 {
1120 LASSERT(plist->pl_nr > 0);
1121 LINVRNT(plist->pl_owner == current);
1122
1123 ENTRY;
1124 list_del_init(&page->cp_batch);
1125 lockdep_off();
1126 mutex_unlock(&page->cp_mutex);
1127 lockdep_on();
1128 --plist->pl_nr;
1129 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1130 cl_page_put(env, page);
1131 EXIT;
1132 }
1133 EXPORT_SYMBOL(cl_page_list_del);
1134
1135 /**
1136 * Moves a page from one page list to another.
1137 */
1138 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1139 struct cl_page *page)
1140 {
1141 LASSERT(src->pl_nr > 0);
1142 LINVRNT(dst->pl_owner == current);
1143 LINVRNT(src->pl_owner == current);
1144
1145 ENTRY;
1146 list_move_tail(&page->cp_batch, &dst->pl_pages);
1147 --src->pl_nr;
1148 ++dst->pl_nr;
1149 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1150 src, dst);
1151 EXIT;
1152 }
1153 EXPORT_SYMBOL(cl_page_list_move);
1154
1155 /**
1156 * splice the cl_page_list, just as list head does
1157 */
1158 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1159 {
1160 struct cl_page *page;
1161 struct cl_page *tmp;
1162
1163 LINVRNT(list->pl_owner == current);
1164 LINVRNT(head->pl_owner == current);
1165
1166 ENTRY;
1167 cl_page_list_for_each_safe(page, tmp, list)
1168 cl_page_list_move(head, list, page);
1169 EXIT;
1170 }
1171 EXPORT_SYMBOL(cl_page_list_splice);
1172
1173 void cl_page_disown0(const struct lu_env *env,
1174 struct cl_io *io, struct cl_page *pg);
1175
1176 /**
1177 * Disowns pages in a queue.
1178 */
1179 void cl_page_list_disown(const struct lu_env *env,
1180 struct cl_io *io, struct cl_page_list *plist)
1181 {
1182 struct cl_page *page;
1183 struct cl_page *temp;
1184
1185 LINVRNT(plist->pl_owner == current);
1186
1187 ENTRY;
1188 cl_page_list_for_each_safe(page, temp, plist) {
1189 LASSERT(plist->pl_nr > 0);
1190
1191 list_del_init(&page->cp_batch);
1192 lockdep_off();
1193 mutex_unlock(&page->cp_mutex);
1194 lockdep_on();
1195 --plist->pl_nr;
1196 /*
1197 * cl_page_disown0 rather than usual cl_page_disown() is used,
1198 * because pages are possibly in CPS_FREEING state already due
1199 * to the call to cl_page_list_discard().
1200 */
1201 /*
1202 * XXX cl_page_disown0() will fail if page is not locked.
1203 */
1204 cl_page_disown0(env, io, page);
1205 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1206 plist);
1207 cl_page_put(env, page);
1208 }
1209 EXIT;
1210 }
1211 EXPORT_SYMBOL(cl_page_list_disown);
1212
1213 /**
1214 * Releases pages from queue.
1215 */
1216 void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
1217 {
1218 struct cl_page *page;
1219 struct cl_page *temp;
1220
1221 LINVRNT(plist->pl_owner == current);
1222
1223 ENTRY;
1224 cl_page_list_for_each_safe(page, temp, plist)
1225 cl_page_list_del(env, plist, page);
1226 LASSERT(plist->pl_nr == 0);
1227 EXIT;
1228 }
1229 EXPORT_SYMBOL(cl_page_list_fini);
1230
1231 /**
1232 * Owns all pages in a queue.
1233 */
1234 int cl_page_list_own(const struct lu_env *env,
1235 struct cl_io *io, struct cl_page_list *plist)
1236 {
1237 struct cl_page *page;
1238 struct cl_page *temp;
1239 pgoff_t index = 0;
1240 int result;
1241
1242 LINVRNT(plist->pl_owner == current);
1243
1244 ENTRY;
1245 result = 0;
1246 cl_page_list_for_each_safe(page, temp, plist) {
1247 LASSERT(index <= page->cp_index);
1248 index = page->cp_index;
1249 if (cl_page_own(env, io, page) == 0)
1250 result = result ?: page->cp_error;
1251 else
1252 cl_page_list_del(env, plist, page);
1253 }
1254 RETURN(result);
1255 }
1256 EXPORT_SYMBOL(cl_page_list_own);
1257
1258 /**
1259 * Assumes all pages in a queue.
1260 */
1261 void cl_page_list_assume(const struct lu_env *env,
1262 struct cl_io *io, struct cl_page_list *plist)
1263 {
1264 struct cl_page *page;
1265
1266 LINVRNT(plist->pl_owner == current);
1267
1268 cl_page_list_for_each(page, plist)
1269 cl_page_assume(env, io, page);
1270 }
1271 EXPORT_SYMBOL(cl_page_list_assume);
1272
1273 /**
1274 * Discards all pages in a queue.
1275 */
1276 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1277 struct cl_page_list *plist)
1278 {
1279 struct cl_page *page;
1280
1281 LINVRNT(plist->pl_owner == current);
1282 ENTRY;
1283 cl_page_list_for_each(page, plist)
1284 cl_page_discard(env, io, page);
1285 EXIT;
1286 }
1287 EXPORT_SYMBOL(cl_page_list_discard);
1288
1289 /**
1290 * Unmaps all pages in a queue from user virtual memory.
1291 */
1292 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
1293 struct cl_page_list *plist)
1294 {
1295 struct cl_page *page;
1296 int result;
1297
1298 LINVRNT(plist->pl_owner == current);
1299 ENTRY;
1300 result = 0;
1301 cl_page_list_for_each(page, plist) {
1302 result = cl_page_unmap(env, io, page);
1303 if (result != 0)
1304 break;
1305 }
1306 RETURN(result);
1307 }
1308 EXPORT_SYMBOL(cl_page_list_unmap);
1309
1310 /**
1311 * Initialize dual page queue.
1312 */
1313 void cl_2queue_init(struct cl_2queue *queue)
1314 {
1315 ENTRY;
1316 cl_page_list_init(&queue->c2_qin);
1317 cl_page_list_init(&queue->c2_qout);
1318 EXIT;
1319 }
1320 EXPORT_SYMBOL(cl_2queue_init);
1321
1322 /**
1323 * Add a page to the incoming page list of 2-queue.
1324 */
1325 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1326 {
1327 ENTRY;
1328 cl_page_list_add(&queue->c2_qin, page);
1329 EXIT;
1330 }
1331 EXPORT_SYMBOL(cl_2queue_add);
1332
1333 /**
1334 * Disown pages in both lists of a 2-queue.
1335 */
1336 void cl_2queue_disown(const struct lu_env *env,
1337 struct cl_io *io, struct cl_2queue *queue)
1338 {
1339 ENTRY;
1340 cl_page_list_disown(env, io, &queue->c2_qin);
1341 cl_page_list_disown(env, io, &queue->c2_qout);
1342 EXIT;
1343 }
1344 EXPORT_SYMBOL(cl_2queue_disown);
1345
1346 /**
1347 * Discard (truncate) pages in both lists of a 2-queue.
1348 */
1349 void cl_2queue_discard(const struct lu_env *env,
1350 struct cl_io *io, struct cl_2queue *queue)
1351 {
1352 ENTRY;
1353 cl_page_list_discard(env, io, &queue->c2_qin);
1354 cl_page_list_discard(env, io, &queue->c2_qout);
1355 EXIT;
1356 }
1357 EXPORT_SYMBOL(cl_2queue_discard);
1358
1359 /**
1360 * Assume to own the pages in cl_2queue
1361 */
1362 void cl_2queue_assume(const struct lu_env *env,
1363 struct cl_io *io, struct cl_2queue *queue)
1364 {
1365 cl_page_list_assume(env, io, &queue->c2_qin);
1366 cl_page_list_assume(env, io, &queue->c2_qout);
1367 }
1368 EXPORT_SYMBOL(cl_2queue_assume);
1369
1370 /**
1371 * Finalize both page lists of a 2-queue.
1372 */
1373 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1374 {
1375 ENTRY;
1376 cl_page_list_fini(env, &queue->c2_qout);
1377 cl_page_list_fini(env, &queue->c2_qin);
1378 EXIT;
1379 }
1380 EXPORT_SYMBOL(cl_2queue_fini);
1381
1382 /**
1383 * Initialize a 2-queue to contain \a page in its incoming page list.
1384 */
1385 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1386 {
1387 ENTRY;
1388 cl_2queue_init(queue);
1389 cl_2queue_add(queue, page);
1390 EXIT;
1391 }
1392 EXPORT_SYMBOL(cl_2queue_init_page);
1393
1394 /**
1395 * Returns top-level io.
1396 *
1397 * \see cl_object_top(), cl_page_top().
1398 */
1399 struct cl_io *cl_io_top(struct cl_io *io)
1400 {
1401 ENTRY;
1402 while (io->ci_parent != NULL)
1403 io = io->ci_parent;
1404 RETURN(io);
1405 }
1406 EXPORT_SYMBOL(cl_io_top);
1407
1408 /**
1409 * Prints human readable representation of \a io to the \a f.
1410 */
1411 void cl_io_print(const struct lu_env *env, void *cookie,
1412 lu_printer_t printer, const struct cl_io *io)
1413 {
1414 }
1415
1416 /**
1417 * Adds request slice to the compound request.
1418 *
1419 * This is called by cl_device_operations::cdo_req_init() methods to add a
1420 * per-layer state to the request. New state is added at the end of
1421 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1422 *
1423 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1424 */
1425 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1426 struct cl_device *dev,
1427 const struct cl_req_operations *ops)
1428 {
1429 ENTRY;
1430 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1431 slice->crs_dev = dev;
1432 slice->crs_ops = ops;
1433 slice->crs_req = req;
1434 EXIT;
1435 }
1436 EXPORT_SYMBOL(cl_req_slice_add);
1437
1438 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1439 {
1440 unsigned i;
1441
1442 LASSERT(list_empty(&req->crq_pages));
1443 LASSERT(req->crq_nrpages == 0);
1444 LINVRNT(list_empty(&req->crq_layers));
1445 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1446 ENTRY;
1447
1448 if (req->crq_o != NULL) {
1449 for (i = 0; i < req->crq_nrobjs; ++i) {
1450 struct cl_object *obj = req->crq_o[i].ro_obj;
1451 if (obj != NULL) {
1452 lu_object_ref_del_at(&obj->co_lu,
1453 &req->crq_o[i].ro_obj_ref,
1454 "cl_req", req);
1455 cl_object_put(env, obj);
1456 }
1457 }
1458 OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
1459 }
1460 OBD_FREE_PTR(req);
1461 EXIT;
1462 }
1463
1464 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1465 struct cl_page *page)
1466 {
1467 struct cl_device *dev;
1468 struct cl_page_slice *slice;
1469 int result;
1470
1471 ENTRY;
1472 result = 0;
1473 page = cl_page_top(page);
1474 do {
1475 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1476 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1477 if (dev->cd_ops->cdo_req_init != NULL) {
1478 result = dev->cd_ops->cdo_req_init(env,
1479 dev, req);
1480 if (result != 0)
1481 break;
1482 }
1483 }
1484 page = page->cp_child;
1485 } while (page != NULL && result == 0);
1486 RETURN(result);
1487 }
1488
1489 /**
1490 * Invokes per-request transfer completion call-backs
1491 * (cl_req_operations::cro_completion()) bottom-to-top.
1492 */
1493 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1494 {
1495 struct cl_req_slice *slice;
1496
1497 ENTRY;
1498 /*
1499 * for the lack of list_for_each_entry_reverse_safe()...
1500 */
1501 while (!list_empty(&req->crq_layers)) {
1502 slice = list_entry(req->crq_layers.prev,
1503 struct cl_req_slice, crs_linkage);
1504 list_del_init(&slice->crs_linkage);
1505 if (slice->crs_ops->cro_completion != NULL)
1506 slice->crs_ops->cro_completion(env, slice, rc);
1507 }
1508 cl_req_free(env, req);
1509 EXIT;
1510 }
1511 EXPORT_SYMBOL(cl_req_completion);
1512
1513 /**
1514 * Allocates new transfer request.
1515 */
1516 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1517 enum cl_req_type crt, int nr_objects)
1518 {
1519 struct cl_req *req;
1520
1521 LINVRNT(nr_objects > 0);
1522 ENTRY;
1523
1524 OBD_ALLOC_PTR(req);
1525 if (req != NULL) {
1526 int result;
1527
1528 OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
1529 if (req->crq_o != NULL) {
1530 req->crq_nrobjs = nr_objects;
1531 req->crq_type = crt;
1532 INIT_LIST_HEAD(&req->crq_pages);
1533 INIT_LIST_HEAD(&req->crq_layers);
1534 result = cl_req_init(env, req, page);
1535 } else
1536 result = -ENOMEM;
1537 if (result != 0) {
1538 cl_req_completion(env, req, result);
1539 req = ERR_PTR(result);
1540 }
1541 } else
1542 req = ERR_PTR(-ENOMEM);
1543 RETURN(req);
1544 }
1545 EXPORT_SYMBOL(cl_req_alloc);
1546
1547 /**
1548 * Adds a page to a request.
1549 */
1550 void cl_req_page_add(const struct lu_env *env,
1551 struct cl_req *req, struct cl_page *page)
1552 {
1553 struct cl_object *obj;
1554 struct cl_req_obj *rqo;
1555 int i;
1556
1557 ENTRY;
1558 page = cl_page_top(page);
1559
1560 LASSERT(list_empty(&page->cp_flight));
1561 LASSERT(page->cp_req == NULL);
1562
1563 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1564 req, req->crq_type, req->crq_nrpages);
1565
1566 list_add_tail(&page->cp_flight, &req->crq_pages);
1567 ++req->crq_nrpages;
1568 page->cp_req = req;
1569 obj = cl_object_top(page->cp_obj);
1570 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1571 if (rqo->ro_obj == NULL) {
1572 rqo->ro_obj = obj;
1573 cl_object_get(obj);
1574 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1575 "cl_req", req);
1576 break;
1577 }
1578 }
1579 LASSERT(i < req->crq_nrobjs);
1580 EXIT;
1581 }
1582 EXPORT_SYMBOL(cl_req_page_add);
1583
1584 /**
1585 * Removes a page from a request.
1586 */
1587 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1588 {
1589 struct cl_req *req = page->cp_req;
1590
1591 ENTRY;
1592 page = cl_page_top(page);
1593
1594 LASSERT(!list_empty(&page->cp_flight));
1595 LASSERT(req->crq_nrpages > 0);
1596
1597 list_del_init(&page->cp_flight);
1598 --req->crq_nrpages;
1599 page->cp_req = NULL;
1600 EXIT;
1601 }
1602 EXPORT_SYMBOL(cl_req_page_done);
1603
1604 /**
1605 * Notifies layers that request is about to depart by calling
1606 * cl_req_operations::cro_prep() top-to-bottom.
1607 */
1608 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1609 {
1610 int i;
1611 int result;
1612 const struct cl_req_slice *slice;
1613
1614 ENTRY;
1615 /*
1616 * Check that the caller of cl_req_alloc() didn't lie about the number
1617 * of objects.
1618 */
1619 for (i = 0; i < req->crq_nrobjs; ++i)
1620 LASSERT(req->crq_o[i].ro_obj != NULL);
1621
1622 result = 0;
1623 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1624 if (slice->crs_ops->cro_prep != NULL) {
1625 result = slice->crs_ops->cro_prep(env, slice);
1626 if (result != 0)
1627 break;
1628 }
1629 }
1630 RETURN(result);
1631 }
1632 EXPORT_SYMBOL(cl_req_prep);
1633
1634 /**
1635 * Fills in attributes that are passed to server together with transfer. Only
1636 * attributes from \a flags may be touched. This can be called multiple times
1637 * for the same request.
1638 */
1639 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1640 struct cl_req_attr *attr, obd_valid flags)
1641 {
1642 const struct cl_req_slice *slice;
1643 struct cl_page *page;
1644 int i;
1645
1646 LASSERT(!list_empty(&req->crq_pages));
1647 ENTRY;
1648
1649 /* Take any page to use as a model. */
1650 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1651
1652 for (i = 0; i < req->crq_nrobjs; ++i) {
1653 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1654 const struct cl_page_slice *scan;
1655 const struct cl_object *obj;
1656
1657 scan = cl_page_at(page,
1658 slice->crs_dev->cd_lu_dev.ld_type);
1659 LASSERT(scan != NULL);
1660 obj = scan->cpl_obj;
1661 if (slice->crs_ops->cro_attr_set != NULL)
1662 slice->crs_ops->cro_attr_set(env, slice, obj,
1663 attr + i, flags);
1664 }
1665 }
1666 EXIT;
1667 }
1668 EXPORT_SYMBOL(cl_req_attr_set);
1669
1670 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1671 * implemented in libcfs. */
1672 # include <linux/sched.h>
1673
1674 /**
1675 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1676 */
1677 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1678 {
1679 ENTRY;
1680 init_waitqueue_head(&anchor->csi_waitq);
1681 atomic_set(&anchor->csi_sync_nr, nrpages);
1682 atomic_set(&anchor->csi_barrier, nrpages > 0);
1683 anchor->csi_sync_rc = 0;
1684 EXIT;
1685 }
1686 EXPORT_SYMBOL(cl_sync_io_init);
1687
1688 /**
1689 * Wait until all transfer completes. Transfer completion routine has to call
1690 * cl_sync_io_note() for every page.
1691 */
1692 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1693 struct cl_page_list *queue, struct cl_sync_io *anchor,
1694 long timeout)
1695 {
1696 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1697 NULL, NULL, NULL);
1698 int rc;
1699 ENTRY;
1700
1701 LASSERT(timeout >= 0);
1702
1703 rc = l_wait_event(anchor->csi_waitq,
1704 atomic_read(&anchor->csi_sync_nr) == 0,
1705 &lwi);
1706 if (rc < 0) {
1707 CERROR("SYNC IO failed with error: %d, try to cancel "
1708 "%d remaining pages\n",
1709 rc, atomic_read(&anchor->csi_sync_nr));
1710
1711 (void)cl_io_cancel(env, io, queue);
1712
1713 lwi = (struct l_wait_info) { 0 };
1714 (void)l_wait_event(anchor->csi_waitq,
1715 atomic_read(&anchor->csi_sync_nr) == 0,
1716 &lwi);
1717 } else {
1718 rc = anchor->csi_sync_rc;
1719 }
1720 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1721 cl_page_list_assume(env, io, queue);
1722
1723 /* wait until cl_sync_io_note() has done wakeup */
1724 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1725 cpu_relax();
1726 }
1727
1728 POISON(anchor, 0x5a, sizeof *anchor);
1729 RETURN(rc);
1730 }
1731 EXPORT_SYMBOL(cl_sync_io_wait);
1732
1733 /**
1734 * Indicate that transfer of a single page completed.
1735 */
1736 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1737 {
1738 ENTRY;
1739 if (anchor->csi_sync_rc == 0 && ioret < 0)
1740 anchor->csi_sync_rc = ioret;
1741 /*
1742 * Synchronous IO done without releasing page lock (e.g., as a part of
1743 * ->{prepare,commit}_write(). Completion is used to signal the end of
1744 * IO.
1745 */
1746 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1747 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1748 wake_up_all(&anchor->csi_waitq);
1749 /* it's safe to nuke or reuse anchor now */
1750 atomic_set(&anchor->csi_barrier, 0);
1751 }
1752 EXIT;
1753 }
1754 EXPORT_SYMBOL(cl_sync_io_note);
This page took 0.06763 seconds and 6 git commands to generate.