NFSv4.1: Add helpers for setting/reading the I/O fail bit
[deliverable/linux.git] / fs / nfs / pnfs.c
1 /*
2 * pNFS functions to call and manage layout drivers.
3 *
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 *
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
18 *
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
27 * such damages.
28 */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36
37 #define NFSDBG_FACILITY NFSDBG_PNFS
38
39 /* Locking:
40 *
41 * pnfs_spinlock:
42 * protects pnfs_modules_tbl.
43 */
44 static DEFINE_SPINLOCK(pnfs_spinlock);
45
46 /*
47 * pnfs_modules_tbl holds all pnfs modules
48 */
49 static LIST_HEAD(pnfs_modules_tbl);
50
51 /* Return the registered pnfs layout driver module matching given id */
52 static struct pnfs_layoutdriver_type *
53 find_pnfs_driver_locked(u32 id)
54 {
55 struct pnfs_layoutdriver_type *local;
56
57 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
58 if (local->id == id)
59 goto out;
60 local = NULL;
61 out:
62 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
63 return local;
64 }
65
66 static struct pnfs_layoutdriver_type *
67 find_pnfs_driver(u32 id)
68 {
69 struct pnfs_layoutdriver_type *local;
70
71 spin_lock(&pnfs_spinlock);
72 local = find_pnfs_driver_locked(id);
73 if (local != NULL && !try_module_get(local->owner)) {
74 dprintk("%s: Could not grab reference on module\n", __func__);
75 local = NULL;
76 }
77 spin_unlock(&pnfs_spinlock);
78 return local;
79 }
80
81 void
82 unset_pnfs_layoutdriver(struct nfs_server *nfss)
83 {
84 if (nfss->pnfs_curr_ld) {
85 if (nfss->pnfs_curr_ld->clear_layoutdriver)
86 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
87 /* Decrement the MDS count. Purge the deviceid cache if zero */
88 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
89 nfs4_deviceid_purge_client(nfss->nfs_client);
90 module_put(nfss->pnfs_curr_ld->owner);
91 }
92 nfss->pnfs_curr_ld = NULL;
93 }
94
95 /*
96 * Try to set the server's pnfs module to the pnfs layout type specified by id.
97 * Currently only one pNFS layout driver per filesystem is supported.
98 *
99 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
100 */
101 void
102 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
103 u32 id)
104 {
105 struct pnfs_layoutdriver_type *ld_type = NULL;
106
107 if (id == 0)
108 goto out_no_driver;
109 if (!(server->nfs_client->cl_exchange_flags &
110 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
111 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
112 __func__, id, server->nfs_client->cl_exchange_flags);
113 goto out_no_driver;
114 }
115 ld_type = find_pnfs_driver(id);
116 if (!ld_type) {
117 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
118 ld_type = find_pnfs_driver(id);
119 if (!ld_type) {
120 dprintk("%s: No pNFS module found for %u.\n",
121 __func__, id);
122 goto out_no_driver;
123 }
124 }
125 server->pnfs_curr_ld = ld_type;
126 if (ld_type->set_layoutdriver
127 && ld_type->set_layoutdriver(server, mntfh)) {
128 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
129 "driver %u.\n", __func__, id);
130 module_put(ld_type->owner);
131 goto out_no_driver;
132 }
133 /* Bump the MDS count */
134 atomic_inc(&server->nfs_client->cl_mds_count);
135
136 dprintk("%s: pNFS module for %u set\n", __func__, id);
137 return;
138
139 out_no_driver:
140 dprintk("%s: Using NFSv4 I/O\n", __func__);
141 server->pnfs_curr_ld = NULL;
142 }
143
144 int
145 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
146 {
147 int status = -EINVAL;
148 struct pnfs_layoutdriver_type *tmp;
149
150 if (ld_type->id == 0) {
151 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
152 return status;
153 }
154 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
155 printk(KERN_ERR "NFS: %s Layout driver must provide "
156 "alloc_lseg and free_lseg.\n", __func__);
157 return status;
158 }
159
160 spin_lock(&pnfs_spinlock);
161 tmp = find_pnfs_driver_locked(ld_type->id);
162 if (!tmp) {
163 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
164 status = 0;
165 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
166 ld_type->name);
167 } else {
168 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
169 __func__, ld_type->id);
170 }
171 spin_unlock(&pnfs_spinlock);
172
173 return status;
174 }
175 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
176
177 void
178 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
179 {
180 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
181 spin_lock(&pnfs_spinlock);
182 list_del(&ld_type->pnfs_tblid);
183 spin_unlock(&pnfs_spinlock);
184 }
185 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
186
187 /*
188 * pNFS client layout cache
189 */
190
191 /* Need to hold i_lock if caller does not already hold reference */
192 void
193 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
194 {
195 atomic_inc(&lo->plh_refcount);
196 }
197
198 static struct pnfs_layout_hdr *
199 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
200 {
201 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
202 return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
203 kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
204 }
205
206 static void
207 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
208 {
209 struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
210 put_rpccred(lo->plh_lc_cred);
211 return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
212 }
213
214 static void
215 destroy_layout_hdr(struct pnfs_layout_hdr *lo)
216 {
217 dprintk("%s: freeing layout cache %p\n", __func__, lo);
218 BUG_ON(!list_empty(&lo->plh_layouts));
219 NFS_I(lo->plh_inode)->layout = NULL;
220 pnfs_free_layout_hdr(lo);
221 }
222
223 static void
224 pnfs_put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
225 {
226 if (atomic_dec_and_test(&lo->plh_refcount))
227 destroy_layout_hdr(lo);
228 }
229
230 void
231 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
232 {
233 struct inode *inode = lo->plh_inode;
234
235 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
236 destroy_layout_hdr(lo);
237 spin_unlock(&inode->i_lock);
238 }
239 }
240
241 static int
242 pnfs_iomode_to_fail_bit(u32 iomode)
243 {
244 return iomode == IOMODE_RW ?
245 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
246 }
247
248 static void
249 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
250 {
251 set_bit(pnfs_iomode_to_fail_bit(iomode), &lo->plh_flags);
252 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
253 iomode == IOMODE_RW ? "RW" : "READ");
254 }
255
256 static bool
257 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
258 {
259 return test_bit(pnfs_iomode_to_fail_bit(iomode), &lo->plh_flags) != 0;
260 }
261
262 static void
263 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
264 {
265 INIT_LIST_HEAD(&lseg->pls_list);
266 INIT_LIST_HEAD(&lseg->pls_lc_list);
267 atomic_set(&lseg->pls_refcount, 1);
268 smp_mb();
269 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
270 lseg->pls_layout = lo;
271 }
272
273 static void free_lseg(struct pnfs_layout_segment *lseg)
274 {
275 struct inode *ino = lseg->pls_layout->plh_inode;
276
277 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
278 /* Matched by pnfs_get_layout_hdr in pnfs_insert_layout */
279 pnfs_put_layout_hdr(NFS_I(ino)->layout);
280 }
281
282 static void
283 pnfs_put_lseg_common(struct pnfs_layout_segment *lseg)
284 {
285 struct inode *inode = lseg->pls_layout->plh_inode;
286
287 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
288 list_del_init(&lseg->pls_list);
289 if (list_empty(&lseg->pls_layout->plh_segs)) {
290 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
291 /* Matched by initial refcount set in alloc_init_layout_hdr */
292 pnfs_put_layout_hdr_locked(lseg->pls_layout);
293 }
294 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
295 }
296
297 void
298 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
299 {
300 struct inode *inode;
301
302 if (!lseg)
303 return;
304
305 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
306 atomic_read(&lseg->pls_refcount),
307 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
308 inode = lseg->pls_layout->plh_inode;
309 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
310 LIST_HEAD(free_me);
311
312 pnfs_put_lseg_common(lseg);
313 list_add(&lseg->pls_list, &free_me);
314 spin_unlock(&inode->i_lock);
315 pnfs_free_lseg_list(&free_me);
316 }
317 }
318 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
319
320 static inline u64
321 end_offset(u64 start, u64 len)
322 {
323 u64 end;
324
325 end = start + len;
326 return end >= start ? end : NFS4_MAX_UINT64;
327 }
328
329 /* last octet in a range */
330 static inline u64
331 last_byte_offset(u64 start, u64 len)
332 {
333 u64 end;
334
335 BUG_ON(!len);
336 end = start + len;
337 return end > start ? end - 1 : NFS4_MAX_UINT64;
338 }
339
340 /*
341 * is l2 fully contained in l1?
342 * start1 end1
343 * [----------------------------------)
344 * start2 end2
345 * [----------------)
346 */
347 static inline int
348 lo_seg_contained(struct pnfs_layout_range *l1,
349 struct pnfs_layout_range *l2)
350 {
351 u64 start1 = l1->offset;
352 u64 end1 = end_offset(start1, l1->length);
353 u64 start2 = l2->offset;
354 u64 end2 = end_offset(start2, l2->length);
355
356 return (start1 <= start2) && (end1 >= end2);
357 }
358
359 /*
360 * is l1 and l2 intersecting?
361 * start1 end1
362 * [----------------------------------)
363 * start2 end2
364 * [----------------)
365 */
366 static inline int
367 lo_seg_intersecting(struct pnfs_layout_range *l1,
368 struct pnfs_layout_range *l2)
369 {
370 u64 start1 = l1->offset;
371 u64 end1 = end_offset(start1, l1->length);
372 u64 start2 = l2->offset;
373 u64 end2 = end_offset(start2, l2->length);
374
375 return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
376 (end2 == NFS4_MAX_UINT64 || end2 > start1);
377 }
378
379 static bool
380 should_free_lseg(struct pnfs_layout_range *lseg_range,
381 struct pnfs_layout_range *recall_range)
382 {
383 return (recall_range->iomode == IOMODE_ANY ||
384 lseg_range->iomode == recall_range->iomode) &&
385 lo_seg_intersecting(lseg_range, recall_range);
386 }
387
388 /* Returns 1 if lseg is removed from list, 0 otherwise */
389 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
390 struct list_head *tmp_list)
391 {
392 int rv = 0;
393
394 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
395 /* Remove the reference keeping the lseg in the
396 * list. It will now be removed when all
397 * outstanding io is finished.
398 */
399 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
400 atomic_read(&lseg->pls_refcount));
401 if (atomic_dec_and_test(&lseg->pls_refcount)) {
402 pnfs_put_lseg_common(lseg);
403 list_add(&lseg->pls_list, tmp_list);
404 rv = 1;
405 }
406 }
407 return rv;
408 }
409
410 /* Returns count of number of matching invalid lsegs remaining in list
411 * after call.
412 */
413 int
414 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
415 struct list_head *tmp_list,
416 struct pnfs_layout_range *recall_range)
417 {
418 struct pnfs_layout_segment *lseg, *next;
419 int invalid = 0, removed = 0;
420
421 dprintk("%s:Begin lo %p\n", __func__, lo);
422
423 if (list_empty(&lo->plh_segs)) {
424 /* Reset MDS Threshold I/O counters */
425 NFS_I(lo->plh_inode)->write_io = 0;
426 NFS_I(lo->plh_inode)->read_io = 0;
427 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
428 pnfs_put_layout_hdr_locked(lo);
429 return 0;
430 }
431 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
432 if (!recall_range ||
433 should_free_lseg(&lseg->pls_range, recall_range)) {
434 dprintk("%s: freeing lseg %p iomode %d "
435 "offset %llu length %llu\n", __func__,
436 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
437 lseg->pls_range.length);
438 invalid++;
439 removed += mark_lseg_invalid(lseg, tmp_list);
440 }
441 dprintk("%s:Return %i\n", __func__, invalid - removed);
442 return invalid - removed;
443 }
444
445 /* note free_me must contain lsegs from a single layout_hdr */
446 void
447 pnfs_free_lseg_list(struct list_head *free_me)
448 {
449 struct pnfs_layout_segment *lseg, *tmp;
450 struct pnfs_layout_hdr *lo;
451
452 if (list_empty(free_me))
453 return;
454
455 lo = list_first_entry(free_me, struct pnfs_layout_segment,
456 pls_list)->pls_layout;
457
458 if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
459 struct nfs_client *clp;
460
461 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
462 spin_lock(&clp->cl_lock);
463 list_del_init(&lo->plh_layouts);
464 spin_unlock(&clp->cl_lock);
465 }
466 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
467 list_del(&lseg->pls_list);
468 free_lseg(lseg);
469 }
470 }
471
472 void
473 pnfs_destroy_layout(struct nfs_inode *nfsi)
474 {
475 struct pnfs_layout_hdr *lo;
476 LIST_HEAD(tmp_list);
477
478 spin_lock(&nfsi->vfs_inode.i_lock);
479 lo = nfsi->layout;
480 if (lo) {
481 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
482 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
483 }
484 spin_unlock(&nfsi->vfs_inode.i_lock);
485 pnfs_free_lseg_list(&tmp_list);
486 }
487 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
488
489 /*
490 * Called by the state manger to remove all layouts established under an
491 * expired lease.
492 */
493 void
494 pnfs_destroy_all_layouts(struct nfs_client *clp)
495 {
496 struct nfs_server *server;
497 struct pnfs_layout_hdr *lo;
498 LIST_HEAD(tmp_list);
499
500 nfs4_deviceid_mark_client_invalid(clp);
501 nfs4_deviceid_purge_client(clp);
502
503 spin_lock(&clp->cl_lock);
504 rcu_read_lock();
505 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
506 if (!list_empty(&server->layouts))
507 list_splice_init(&server->layouts, &tmp_list);
508 }
509 rcu_read_unlock();
510 spin_unlock(&clp->cl_lock);
511
512 while (!list_empty(&tmp_list)) {
513 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
514 plh_layouts);
515 dprintk("%s freeing layout for inode %lu\n", __func__,
516 lo->plh_inode->i_ino);
517 list_del_init(&lo->plh_layouts);
518 pnfs_destroy_layout(NFS_I(lo->plh_inode));
519 }
520 }
521
522 /* update lo->plh_stateid with new if is more recent */
523 void
524 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
525 bool update_barrier)
526 {
527 u32 oldseq, newseq;
528
529 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
530 newseq = be32_to_cpu(new->seqid);
531 if ((int)(newseq - oldseq) > 0) {
532 nfs4_stateid_copy(&lo->plh_stateid, new);
533 if (update_barrier) {
534 u32 new_barrier = be32_to_cpu(new->seqid);
535
536 if ((int)(new_barrier - lo->plh_barrier))
537 lo->plh_barrier = new_barrier;
538 } else {
539 /* Because of wraparound, we want to keep the barrier
540 * "close" to the current seqids. It needs to be
541 * within 2**31 to count as "behind", so if it
542 * gets too near that limit, give us a litle leeway
543 * and bring it to within 2**30.
544 * NOTE - and yes, this is all unsigned arithmetic.
545 */
546 if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
547 lo->plh_barrier = newseq - (1 << 30);
548 }
549 }
550 }
551
552 /* lget is set to 1 if called from inside send_layoutget call chain */
553 static bool
554 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
555 int lget)
556 {
557 if ((stateid) &&
558 (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
559 return true;
560 return lo->plh_block_lgets ||
561 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
562 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
563 (list_empty(&lo->plh_segs) &&
564 (atomic_read(&lo->plh_outstanding) > lget));
565 }
566
567 int
568 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
569 struct nfs4_state *open_state)
570 {
571 int status = 0;
572
573 dprintk("--> %s\n", __func__);
574 spin_lock(&lo->plh_inode->i_lock);
575 if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
576 status = -EAGAIN;
577 } else if (list_empty(&lo->plh_segs)) {
578 int seq;
579
580 do {
581 seq = read_seqbegin(&open_state->seqlock);
582 nfs4_stateid_copy(dst, &open_state->stateid);
583 } while (read_seqretry(&open_state->seqlock, seq));
584 } else
585 nfs4_stateid_copy(dst, &lo->plh_stateid);
586 spin_unlock(&lo->plh_inode->i_lock);
587 dprintk("<-- %s\n", __func__);
588 return status;
589 }
590
591 /*
592 * Get layout from server.
593 * for now, assume that whole file layouts are requested.
594 * arg->offset: 0
595 * arg->length: all ones
596 */
597 static struct pnfs_layout_segment *
598 send_layoutget(struct pnfs_layout_hdr *lo,
599 struct nfs_open_context *ctx,
600 struct pnfs_layout_range *range,
601 gfp_t gfp_flags)
602 {
603 struct inode *ino = lo->plh_inode;
604 struct nfs_server *server = NFS_SERVER(ino);
605 struct nfs4_layoutget *lgp;
606 struct pnfs_layout_segment *lseg;
607
608 dprintk("--> %s\n", __func__);
609
610 BUG_ON(ctx == NULL);
611 lgp = kzalloc(sizeof(*lgp), gfp_flags);
612 if (lgp == NULL)
613 return NULL;
614
615 lgp->args.minlength = PAGE_CACHE_SIZE;
616 if (lgp->args.minlength > range->length)
617 lgp->args.minlength = range->length;
618 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
619 lgp->args.range = *range;
620 lgp->args.type = server->pnfs_curr_ld->id;
621 lgp->args.inode = ino;
622 lgp->args.ctx = get_nfs_open_context(ctx);
623 lgp->gfp_flags = gfp_flags;
624
625 /* Synchronously retrieve layout information from server and
626 * store in lseg.
627 */
628 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
629 if (IS_ERR(lseg)) {
630 switch (PTR_ERR(lseg)) {
631 case -ENOMEM:
632 case -ERESTARTSYS:
633 break;
634 default:
635 /* remember that LAYOUTGET failed and suspend trying */
636 pnfs_layout_io_set_failed(lo, range->iomode);
637 }
638 return NULL;
639 }
640
641 return lseg;
642 }
643
644 /*
645 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
646 * when the layout segment list is empty.
647 *
648 * Note that a pnfs_layout_hdr can exist with an empty layout segment
649 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
650 * deviceid is marked invalid.
651 */
652 int
653 _pnfs_return_layout(struct inode *ino)
654 {
655 struct pnfs_layout_hdr *lo = NULL;
656 struct nfs_inode *nfsi = NFS_I(ino);
657 LIST_HEAD(tmp_list);
658 struct nfs4_layoutreturn *lrp;
659 nfs4_stateid stateid;
660 int status = 0, empty;
661
662 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
663
664 spin_lock(&ino->i_lock);
665 lo = nfsi->layout;
666 if (!lo || pnfs_test_layout_returned(lo)) {
667 spin_unlock(&ino->i_lock);
668 dprintk("NFS: %s no layout to return\n", __func__);
669 goto out;
670 }
671 stateid = nfsi->layout->plh_stateid;
672 /* Reference matched in nfs4_layoutreturn_release */
673 pnfs_get_layout_hdr(lo);
674 empty = list_empty(&lo->plh_segs);
675 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
676 /* Don't send a LAYOUTRETURN if list was initially empty */
677 if (empty) {
678 spin_unlock(&ino->i_lock);
679 pnfs_put_layout_hdr(lo);
680 dprintk("NFS: %s no layout segments to return\n", __func__);
681 goto out;
682 }
683 lo->plh_block_lgets++;
684 pnfs_mark_layout_returned(lo);
685 spin_unlock(&ino->i_lock);
686 pnfs_free_lseg_list(&tmp_list);
687
688 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
689
690 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
691 if (unlikely(lrp == NULL)) {
692 status = -ENOMEM;
693 pnfs_layout_io_set_failed(lo, IOMODE_RW);
694 pnfs_layout_io_set_failed(lo, IOMODE_READ);
695 pnfs_clear_layout_returned(lo);
696 pnfs_put_layout_hdr(lo);
697 goto out;
698 }
699
700 lrp->args.stateid = stateid;
701 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
702 lrp->args.inode = ino;
703 lrp->args.layout = lo;
704 lrp->clp = NFS_SERVER(ino)->nfs_client;
705
706 status = nfs4_proc_layoutreturn(lrp);
707 out:
708 dprintk("<-- %s status: %d\n", __func__, status);
709 return status;
710 }
711 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
712
713 bool pnfs_roc(struct inode *ino)
714 {
715 struct pnfs_layout_hdr *lo;
716 struct pnfs_layout_segment *lseg, *tmp;
717 LIST_HEAD(tmp_list);
718 bool found = false;
719
720 spin_lock(&ino->i_lock);
721 lo = NFS_I(ino)->layout;
722 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
723 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
724 goto out_nolayout;
725 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
726 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
727 mark_lseg_invalid(lseg, &tmp_list);
728 found = true;
729 }
730 if (!found)
731 goto out_nolayout;
732 lo->plh_block_lgets++;
733 pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
734 spin_unlock(&ino->i_lock);
735 pnfs_free_lseg_list(&tmp_list);
736 return true;
737
738 out_nolayout:
739 spin_unlock(&ino->i_lock);
740 return false;
741 }
742
743 void pnfs_roc_release(struct inode *ino)
744 {
745 struct pnfs_layout_hdr *lo;
746
747 spin_lock(&ino->i_lock);
748 lo = NFS_I(ino)->layout;
749 lo->plh_block_lgets--;
750 pnfs_put_layout_hdr_locked(lo);
751 spin_unlock(&ino->i_lock);
752 }
753
754 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
755 {
756 struct pnfs_layout_hdr *lo;
757
758 spin_lock(&ino->i_lock);
759 lo = NFS_I(ino)->layout;
760 if ((int)(barrier - lo->plh_barrier) > 0)
761 lo->plh_barrier = barrier;
762 spin_unlock(&ino->i_lock);
763 }
764
765 bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
766 {
767 struct nfs_inode *nfsi = NFS_I(ino);
768 struct pnfs_layout_segment *lseg;
769 bool found = false;
770
771 spin_lock(&ino->i_lock);
772 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
773 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
774 found = true;
775 break;
776 }
777 if (!found) {
778 struct pnfs_layout_hdr *lo = nfsi->layout;
779 u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
780
781 /* Since close does not return a layout stateid for use as
782 * a barrier, we choose the worst-case barrier.
783 */
784 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
785 }
786 spin_unlock(&ino->i_lock);
787 return found;
788 }
789
790 /*
791 * Compare two layout segments for sorting into layout cache.
792 * We want to preferentially return RW over RO layouts, so ensure those
793 * are seen first.
794 */
795 static s64
796 cmp_layout(struct pnfs_layout_range *l1,
797 struct pnfs_layout_range *l2)
798 {
799 s64 d;
800
801 /* high offset > low offset */
802 d = l1->offset - l2->offset;
803 if (d)
804 return d;
805
806 /* short length > long length */
807 d = l2->length - l1->length;
808 if (d)
809 return d;
810
811 /* read > read/write */
812 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
813 }
814
815 static void
816 pnfs_insert_layout(struct pnfs_layout_hdr *lo,
817 struct pnfs_layout_segment *lseg)
818 {
819 struct pnfs_layout_segment *lp;
820
821 dprintk("%s:Begin\n", __func__);
822
823 assert_spin_locked(&lo->plh_inode->i_lock);
824 list_for_each_entry(lp, &lo->plh_segs, pls_list) {
825 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
826 continue;
827 list_add_tail(&lseg->pls_list, &lp->pls_list);
828 dprintk("%s: inserted lseg %p "
829 "iomode %d offset %llu length %llu before "
830 "lp %p iomode %d offset %llu length %llu\n",
831 __func__, lseg, lseg->pls_range.iomode,
832 lseg->pls_range.offset, lseg->pls_range.length,
833 lp, lp->pls_range.iomode, lp->pls_range.offset,
834 lp->pls_range.length);
835 goto out;
836 }
837 list_add_tail(&lseg->pls_list, &lo->plh_segs);
838 dprintk("%s: inserted lseg %p "
839 "iomode %d offset %llu length %llu at tail\n",
840 __func__, lseg, lseg->pls_range.iomode,
841 lseg->pls_range.offset, lseg->pls_range.length);
842 out:
843 pnfs_get_layout_hdr(lo);
844
845 dprintk("%s:Return\n", __func__);
846 }
847
848 static struct pnfs_layout_hdr *
849 alloc_init_layout_hdr(struct inode *ino,
850 struct nfs_open_context *ctx,
851 gfp_t gfp_flags)
852 {
853 struct pnfs_layout_hdr *lo;
854
855 lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
856 if (!lo)
857 return NULL;
858 atomic_set(&lo->plh_refcount, 1);
859 INIT_LIST_HEAD(&lo->plh_layouts);
860 INIT_LIST_HEAD(&lo->plh_segs);
861 INIT_LIST_HEAD(&lo->plh_bulk_recall);
862 lo->plh_inode = ino;
863 lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
864 return lo;
865 }
866
867 static struct pnfs_layout_hdr *
868 pnfs_find_alloc_layout(struct inode *ino,
869 struct nfs_open_context *ctx,
870 gfp_t gfp_flags)
871 {
872 struct nfs_inode *nfsi = NFS_I(ino);
873 struct pnfs_layout_hdr *new = NULL;
874
875 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
876
877 assert_spin_locked(&ino->i_lock);
878 if (nfsi->layout) {
879 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
880 return NULL;
881 else
882 return nfsi->layout;
883 }
884 spin_unlock(&ino->i_lock);
885 new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
886 spin_lock(&ino->i_lock);
887
888 if (likely(nfsi->layout == NULL)) /* Won the race? */
889 nfsi->layout = new;
890 else
891 pnfs_free_layout_hdr(new);
892 return nfsi->layout;
893 }
894
895 /*
896 * iomode matching rules:
897 * iomode lseg match
898 * ----- ----- -----
899 * ANY READ true
900 * ANY RW true
901 * RW READ false
902 * RW RW true
903 * READ READ true
904 * READ RW true
905 */
906 static int
907 is_matching_lseg(struct pnfs_layout_range *ls_range,
908 struct pnfs_layout_range *range)
909 {
910 struct pnfs_layout_range range1;
911
912 if ((range->iomode == IOMODE_RW &&
913 ls_range->iomode != IOMODE_RW) ||
914 !lo_seg_intersecting(ls_range, range))
915 return 0;
916
917 /* range1 covers only the first byte in the range */
918 range1 = *range;
919 range1.length = 1;
920 return lo_seg_contained(ls_range, &range1);
921 }
922
923 /*
924 * lookup range in layout
925 */
926 static struct pnfs_layout_segment *
927 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
928 struct pnfs_layout_range *range)
929 {
930 struct pnfs_layout_segment *lseg, *ret = NULL;
931
932 dprintk("%s:Begin\n", __func__);
933
934 assert_spin_locked(&lo->plh_inode->i_lock);
935 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
936 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
937 is_matching_lseg(&lseg->pls_range, range)) {
938 ret = pnfs_get_lseg(lseg);
939 break;
940 }
941 if (lseg->pls_range.offset > range->offset)
942 break;
943 }
944
945 dprintk("%s:Return lseg %p ref %d\n",
946 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
947 return ret;
948 }
949
950 /*
951 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
952 * to the MDS or over pNFS
953 *
954 * The nfs_inode read_io and write_io fields are cumulative counters reset
955 * when there are no layout segments. Note that in pnfs_update_layout iomode
956 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
957 * WRITE request.
958 *
959 * A return of true means use MDS I/O.
960 *
961 * From rfc 5661:
962 * If a file's size is smaller than the file size threshold, data accesses
963 * SHOULD be sent to the metadata server. If an I/O request has a length that
964 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
965 * server. If both file size and I/O size are provided, the client SHOULD
966 * reach or exceed both thresholds before sending its read or write
967 * requests to the data server.
968 */
969 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
970 struct inode *ino, int iomode)
971 {
972 struct nfs4_threshold *t = ctx->mdsthreshold;
973 struct nfs_inode *nfsi = NFS_I(ino);
974 loff_t fsize = i_size_read(ino);
975 bool size = false, size_set = false, io = false, io_set = false, ret = false;
976
977 if (t == NULL)
978 return ret;
979
980 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
981 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
982
983 switch (iomode) {
984 case IOMODE_READ:
985 if (t->bm & THRESHOLD_RD) {
986 dprintk("%s fsize %llu\n", __func__, fsize);
987 size_set = true;
988 if (fsize < t->rd_sz)
989 size = true;
990 }
991 if (t->bm & THRESHOLD_RD_IO) {
992 dprintk("%s nfsi->read_io %llu\n", __func__,
993 nfsi->read_io);
994 io_set = true;
995 if (nfsi->read_io < t->rd_io_sz)
996 io = true;
997 }
998 break;
999 case IOMODE_RW:
1000 if (t->bm & THRESHOLD_WR) {
1001 dprintk("%s fsize %llu\n", __func__, fsize);
1002 size_set = true;
1003 if (fsize < t->wr_sz)
1004 size = true;
1005 }
1006 if (t->bm & THRESHOLD_WR_IO) {
1007 dprintk("%s nfsi->write_io %llu\n", __func__,
1008 nfsi->write_io);
1009 io_set = true;
1010 if (nfsi->write_io < t->wr_io_sz)
1011 io = true;
1012 }
1013 break;
1014 }
1015 if (size_set && io_set) {
1016 if (size && io)
1017 ret = true;
1018 } else if (size || io)
1019 ret = true;
1020
1021 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1022 return ret;
1023 }
1024
1025 /*
1026 * Layout segment is retreived from the server if not cached.
1027 * The appropriate layout segment is referenced and returned to the caller.
1028 */
1029 struct pnfs_layout_segment *
1030 pnfs_update_layout(struct inode *ino,
1031 struct nfs_open_context *ctx,
1032 loff_t pos,
1033 u64 count,
1034 enum pnfs_iomode iomode,
1035 gfp_t gfp_flags)
1036 {
1037 struct pnfs_layout_range arg = {
1038 .iomode = iomode,
1039 .offset = pos,
1040 .length = count,
1041 };
1042 unsigned pg_offset;
1043 struct nfs_server *server = NFS_SERVER(ino);
1044 struct nfs_client *clp = server->nfs_client;
1045 struct pnfs_layout_hdr *lo;
1046 struct pnfs_layout_segment *lseg = NULL;
1047 bool first = false;
1048
1049 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1050 goto out;
1051
1052 if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1053 goto out;
1054
1055 spin_lock(&ino->i_lock);
1056 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1057 if (lo == NULL)
1058 goto out_unlock;
1059
1060 /* Do we even need to bother with this? */
1061 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1062 dprintk("%s matches recall, use MDS\n", __func__);
1063 goto out_unlock;
1064 }
1065
1066 /* if LAYOUTGET already failed once we don't try again */
1067 if (pnfs_layout_io_test_failed(lo, iomode))
1068 goto out_unlock;
1069
1070 /* Check to see if the layout for the given range already exists */
1071 lseg = pnfs_find_lseg(lo, &arg);
1072 if (lseg)
1073 goto out_unlock;
1074
1075 if (pnfs_layoutgets_blocked(lo, NULL, 0))
1076 goto out_unlock;
1077 atomic_inc(&lo->plh_outstanding);
1078
1079 pnfs_get_layout_hdr(lo);
1080 if (list_empty(&lo->plh_segs))
1081 first = true;
1082
1083 /* Enable LAYOUTRETURNs */
1084 pnfs_clear_layout_returned(lo);
1085
1086 spin_unlock(&ino->i_lock);
1087 if (first) {
1088 /* The lo must be on the clp list if there is any
1089 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1090 */
1091 spin_lock(&clp->cl_lock);
1092 BUG_ON(!list_empty(&lo->plh_layouts));
1093 list_add_tail(&lo->plh_layouts, &server->layouts);
1094 spin_unlock(&clp->cl_lock);
1095 }
1096
1097 pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1098 if (pg_offset) {
1099 arg.offset -= pg_offset;
1100 arg.length += pg_offset;
1101 }
1102 if (arg.length != NFS4_MAX_UINT64)
1103 arg.length = PAGE_CACHE_ALIGN(arg.length);
1104
1105 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1106 if (!lseg && first) {
1107 spin_lock(&clp->cl_lock);
1108 list_del_init(&lo->plh_layouts);
1109 spin_unlock(&clp->cl_lock);
1110 }
1111 atomic_dec(&lo->plh_outstanding);
1112 pnfs_put_layout_hdr(lo);
1113 out:
1114 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1115 "(%s, offset: %llu, length: %llu)\n",
1116 __func__, ino->i_sb->s_id,
1117 (unsigned long long)NFS_FILEID(ino),
1118 lseg == NULL ? "not found" : "found",
1119 iomode==IOMODE_RW ? "read/write" : "read-only",
1120 (unsigned long long)pos,
1121 (unsigned long long)count);
1122 return lseg;
1123 out_unlock:
1124 spin_unlock(&ino->i_lock);
1125 goto out;
1126 }
1127 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1128
1129 struct pnfs_layout_segment *
1130 pnfs_layout_process(struct nfs4_layoutget *lgp)
1131 {
1132 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1133 struct nfs4_layoutget_res *res = &lgp->res;
1134 struct pnfs_layout_segment *lseg;
1135 struct inode *ino = lo->plh_inode;
1136 int status = 0;
1137
1138 /* Inject layout blob into I/O device driver */
1139 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1140 if (!lseg || IS_ERR(lseg)) {
1141 if (!lseg)
1142 status = -ENOMEM;
1143 else
1144 status = PTR_ERR(lseg);
1145 dprintk("%s: Could not allocate layout: error %d\n",
1146 __func__, status);
1147 goto out;
1148 }
1149
1150 spin_lock(&ino->i_lock);
1151 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1152 dprintk("%s forget reply due to recall\n", __func__);
1153 goto out_forget_reply;
1154 }
1155
1156 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
1157 dprintk("%s forget reply due to state\n", __func__);
1158 goto out_forget_reply;
1159 }
1160 init_lseg(lo, lseg);
1161 lseg->pls_range = res->range;
1162 pnfs_get_lseg(lseg);
1163 pnfs_insert_layout(lo, lseg);
1164
1165 if (res->return_on_close) {
1166 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1167 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1168 }
1169
1170 /* Done processing layoutget. Set the layout stateid */
1171 pnfs_set_layout_stateid(lo, &res->stateid, false);
1172 spin_unlock(&ino->i_lock);
1173 return lseg;
1174 out:
1175 return ERR_PTR(status);
1176
1177 out_forget_reply:
1178 spin_unlock(&ino->i_lock);
1179 lseg->pls_layout = lo;
1180 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1181 goto out;
1182 }
1183
1184 void
1185 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1186 {
1187 BUG_ON(pgio->pg_lseg != NULL);
1188
1189 if (req->wb_offset != req->wb_pgbase) {
1190 nfs_pageio_reset_read_mds(pgio);
1191 return;
1192 }
1193 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1194 req->wb_context,
1195 req_offset(req),
1196 req->wb_bytes,
1197 IOMODE_READ,
1198 GFP_KERNEL);
1199 /* If no lseg, fall back to read through mds */
1200 if (pgio->pg_lseg == NULL)
1201 nfs_pageio_reset_read_mds(pgio);
1202
1203 }
1204 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1205
1206 void
1207 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1208 {
1209 BUG_ON(pgio->pg_lseg != NULL);
1210
1211 if (req->wb_offset != req->wb_pgbase) {
1212 nfs_pageio_reset_write_mds(pgio);
1213 return;
1214 }
1215 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1216 req->wb_context,
1217 req_offset(req),
1218 req->wb_bytes,
1219 IOMODE_RW,
1220 GFP_NOFS);
1221 /* If no lseg, fall back to write through mds */
1222 if (pgio->pg_lseg == NULL)
1223 nfs_pageio_reset_write_mds(pgio);
1224 }
1225 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1226
1227 void
1228 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1229 const struct nfs_pgio_completion_ops *compl_ops)
1230 {
1231 struct nfs_server *server = NFS_SERVER(inode);
1232 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1233
1234 if (ld == NULL)
1235 nfs_pageio_init_read(pgio, inode, compl_ops);
1236 else
1237 nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
1238 }
1239
1240 void
1241 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1242 int ioflags,
1243 const struct nfs_pgio_completion_ops *compl_ops)
1244 {
1245 struct nfs_server *server = NFS_SERVER(inode);
1246 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1247
1248 if (ld == NULL)
1249 nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
1250 else
1251 nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
1252 }
1253
1254 bool
1255 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1256 struct nfs_page *req)
1257 {
1258 if (pgio->pg_lseg == NULL)
1259 return nfs_generic_pg_test(pgio, prev, req);
1260
1261 /*
1262 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1263 * Note that this test makes several assumptions:
1264 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1265 * is known to lie within the range.
1266 * - that the nfs_page being tested is known to be contiguous with the
1267 * previous nfs_page.
1268 * - Layout ranges are page aligned, so we only have to test the
1269 * start offset of the request.
1270 *
1271 * Please also note that 'end_offset' is actually the offset of the
1272 * first byte that lies outside the pnfs_layout_range. FIXME?
1273 *
1274 */
1275 return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1276 pgio->pg_lseg->pls_range.length);
1277 }
1278 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1279
1280 int pnfs_write_done_resend_to_mds(struct inode *inode,
1281 struct list_head *head,
1282 const struct nfs_pgio_completion_ops *compl_ops)
1283 {
1284 struct nfs_pageio_descriptor pgio;
1285 LIST_HEAD(failed);
1286
1287 /* Resend all requests through the MDS */
1288 nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
1289 while (!list_empty(head)) {
1290 struct nfs_page *req = nfs_list_entry(head->next);
1291
1292 nfs_list_remove_request(req);
1293 if (!nfs_pageio_add_request(&pgio, req))
1294 nfs_list_add_request(req, &failed);
1295 }
1296 nfs_pageio_complete(&pgio);
1297
1298 if (!list_empty(&failed)) {
1299 /* For some reason our attempt to resend pages. Mark the
1300 * overall send request as having failed, and let
1301 * nfs_writeback_release_full deal with the error.
1302 */
1303 list_move(&failed, head);
1304 return -EIO;
1305 }
1306 return 0;
1307 }
1308 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1309
1310 static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1311 {
1312 struct nfs_pgio_header *hdr = data->header;
1313
1314 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1315 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1316 PNFS_LAYOUTRET_ON_ERROR) {
1317 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1318 pnfs_return_layout(hdr->inode);
1319 }
1320 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1321 data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1322 &hdr->pages,
1323 hdr->completion_ops);
1324 }
1325
1326 /*
1327 * Called by non rpc-based layout drivers
1328 */
1329 void pnfs_ld_write_done(struct nfs_write_data *data)
1330 {
1331 struct nfs_pgio_header *hdr = data->header;
1332
1333 if (!hdr->pnfs_error) {
1334 pnfs_set_layoutcommit(data);
1335 hdr->mds_ops->rpc_call_done(&data->task, data);
1336 } else
1337 pnfs_ld_handle_write_error(data);
1338 hdr->mds_ops->rpc_release(data);
1339 }
1340 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1341
1342 static void
1343 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1344 struct nfs_write_data *data)
1345 {
1346 struct nfs_pgio_header *hdr = data->header;
1347
1348 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1349 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1350 nfs_pageio_reset_write_mds(desc);
1351 desc->pg_recoalesce = 1;
1352 }
1353 nfs_writedata_release(data);
1354 }
1355
1356 static enum pnfs_try_status
1357 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1358 const struct rpc_call_ops *call_ops,
1359 struct pnfs_layout_segment *lseg,
1360 int how)
1361 {
1362 struct nfs_pgio_header *hdr = wdata->header;
1363 struct inode *inode = hdr->inode;
1364 enum pnfs_try_status trypnfs;
1365 struct nfs_server *nfss = NFS_SERVER(inode);
1366
1367 hdr->mds_ops = call_ops;
1368
1369 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1370 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1371 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1372 if (trypnfs != PNFS_NOT_ATTEMPTED)
1373 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1374 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1375 return trypnfs;
1376 }
1377
1378 static void
1379 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1380 {
1381 struct nfs_write_data *data;
1382 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1383 struct pnfs_layout_segment *lseg = desc->pg_lseg;
1384
1385 desc->pg_lseg = NULL;
1386 while (!list_empty(head)) {
1387 enum pnfs_try_status trypnfs;
1388
1389 data = list_first_entry(head, struct nfs_write_data, list);
1390 list_del_init(&data->list);
1391
1392 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1393 if (trypnfs == PNFS_NOT_ATTEMPTED)
1394 pnfs_write_through_mds(desc, data);
1395 }
1396 pnfs_put_lseg(lseg);
1397 }
1398
1399 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1400 {
1401 pnfs_put_lseg(hdr->lseg);
1402 nfs_writehdr_free(hdr);
1403 }
1404 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1405
1406 int
1407 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1408 {
1409 struct nfs_write_header *whdr;
1410 struct nfs_pgio_header *hdr;
1411 int ret;
1412
1413 whdr = nfs_writehdr_alloc();
1414 if (!whdr) {
1415 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1416 pnfs_put_lseg(desc->pg_lseg);
1417 desc->pg_lseg = NULL;
1418 return -ENOMEM;
1419 }
1420 hdr = &whdr->header;
1421 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1422 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1423 atomic_inc(&hdr->refcnt);
1424 ret = nfs_generic_flush(desc, hdr);
1425 if (ret != 0) {
1426 pnfs_put_lseg(desc->pg_lseg);
1427 desc->pg_lseg = NULL;
1428 } else
1429 pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
1430 if (atomic_dec_and_test(&hdr->refcnt))
1431 hdr->completion_ops->completion(hdr);
1432 return ret;
1433 }
1434 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1435
1436 int pnfs_read_done_resend_to_mds(struct inode *inode,
1437 struct list_head *head,
1438 const struct nfs_pgio_completion_ops *compl_ops)
1439 {
1440 struct nfs_pageio_descriptor pgio;
1441 LIST_HEAD(failed);
1442
1443 /* Resend all requests through the MDS */
1444 nfs_pageio_init_read(&pgio, inode, compl_ops);
1445 while (!list_empty(head)) {
1446 struct nfs_page *req = nfs_list_entry(head->next);
1447
1448 nfs_list_remove_request(req);
1449 if (!nfs_pageio_add_request(&pgio, req))
1450 nfs_list_add_request(req, &failed);
1451 }
1452 nfs_pageio_complete(&pgio);
1453
1454 if (!list_empty(&failed)) {
1455 list_move(&failed, head);
1456 return -EIO;
1457 }
1458 return 0;
1459 }
1460 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1461
1462 static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1463 {
1464 struct nfs_pgio_header *hdr = data->header;
1465
1466 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1467 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1468 PNFS_LAYOUTRET_ON_ERROR) {
1469 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1470 pnfs_return_layout(hdr->inode);
1471 }
1472 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1473 data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1474 &hdr->pages,
1475 hdr->completion_ops);
1476 }
1477
1478 /*
1479 * Called by non rpc-based layout drivers
1480 */
1481 void pnfs_ld_read_done(struct nfs_read_data *data)
1482 {
1483 struct nfs_pgio_header *hdr = data->header;
1484
1485 if (likely(!hdr->pnfs_error)) {
1486 __nfs4_read_done_cb(data);
1487 hdr->mds_ops->rpc_call_done(&data->task, data);
1488 } else
1489 pnfs_ld_handle_read_error(data);
1490 hdr->mds_ops->rpc_release(data);
1491 }
1492 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1493
1494 static void
1495 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1496 struct nfs_read_data *data)
1497 {
1498 struct nfs_pgio_header *hdr = data->header;
1499
1500 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1501 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1502 nfs_pageio_reset_read_mds(desc);
1503 desc->pg_recoalesce = 1;
1504 }
1505 nfs_readdata_release(data);
1506 }
1507
1508 /*
1509 * Call the appropriate parallel I/O subsystem read function.
1510 */
1511 static enum pnfs_try_status
1512 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1513 const struct rpc_call_ops *call_ops,
1514 struct pnfs_layout_segment *lseg)
1515 {
1516 struct nfs_pgio_header *hdr = rdata->header;
1517 struct inode *inode = hdr->inode;
1518 struct nfs_server *nfss = NFS_SERVER(inode);
1519 enum pnfs_try_status trypnfs;
1520
1521 hdr->mds_ops = call_ops;
1522
1523 dprintk("%s: Reading ino:%lu %u@%llu\n",
1524 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1525
1526 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1527 if (trypnfs != PNFS_NOT_ATTEMPTED)
1528 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1529 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1530 return trypnfs;
1531 }
1532
1533 static void
1534 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1535 {
1536 struct nfs_read_data *data;
1537 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1538 struct pnfs_layout_segment *lseg = desc->pg_lseg;
1539
1540 desc->pg_lseg = NULL;
1541 while (!list_empty(head)) {
1542 enum pnfs_try_status trypnfs;
1543
1544 data = list_first_entry(head, struct nfs_read_data, list);
1545 list_del_init(&data->list);
1546
1547 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1548 if (trypnfs == PNFS_NOT_ATTEMPTED)
1549 pnfs_read_through_mds(desc, data);
1550 }
1551 pnfs_put_lseg(lseg);
1552 }
1553
1554 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1555 {
1556 pnfs_put_lseg(hdr->lseg);
1557 nfs_readhdr_free(hdr);
1558 }
1559 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1560
1561 int
1562 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1563 {
1564 struct nfs_read_header *rhdr;
1565 struct nfs_pgio_header *hdr;
1566 int ret;
1567
1568 rhdr = nfs_readhdr_alloc();
1569 if (!rhdr) {
1570 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1571 ret = -ENOMEM;
1572 pnfs_put_lseg(desc->pg_lseg);
1573 desc->pg_lseg = NULL;
1574 return ret;
1575 }
1576 hdr = &rhdr->header;
1577 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1578 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1579 atomic_inc(&hdr->refcnt);
1580 ret = nfs_generic_pagein(desc, hdr);
1581 if (ret != 0) {
1582 pnfs_put_lseg(desc->pg_lseg);
1583 desc->pg_lseg = NULL;
1584 } else
1585 pnfs_do_multiple_reads(desc, &hdr->rpc_list);
1586 if (atomic_dec_and_test(&hdr->refcnt))
1587 hdr->completion_ops->completion(hdr);
1588 return ret;
1589 }
1590 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1591
1592 /*
1593 * There can be multiple RW segments.
1594 */
1595 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1596 {
1597 struct pnfs_layout_segment *lseg;
1598
1599 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1600 if (lseg->pls_range.iomode == IOMODE_RW &&
1601 test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1602 list_add(&lseg->pls_lc_list, listp);
1603 }
1604 }
1605
1606 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1607 {
1608 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1609 }
1610 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1611
1612 void
1613 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1614 {
1615 struct nfs_pgio_header *hdr = wdata->header;
1616 struct inode *inode = hdr->inode;
1617 struct nfs_inode *nfsi = NFS_I(inode);
1618 loff_t end_pos = wdata->mds_offset + wdata->res.count;
1619 bool mark_as_dirty = false;
1620
1621 spin_lock(&inode->i_lock);
1622 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1623 mark_as_dirty = true;
1624 dprintk("%s: Set layoutcommit for inode %lu ",
1625 __func__, inode->i_ino);
1626 }
1627 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1628 /* references matched in nfs4_layoutcommit_release */
1629 pnfs_get_lseg(hdr->lseg);
1630 }
1631 if (end_pos > nfsi->layout->plh_lwb)
1632 nfsi->layout->plh_lwb = end_pos;
1633 spin_unlock(&inode->i_lock);
1634 dprintk("%s: lseg %p end_pos %llu\n",
1635 __func__, hdr->lseg, nfsi->layout->plh_lwb);
1636
1637 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1638 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1639 if (mark_as_dirty)
1640 mark_inode_dirty_sync(inode);
1641 }
1642 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1643
1644 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1645 {
1646 struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1647
1648 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1649 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1650 }
1651
1652 /*
1653 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1654 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1655 * data to disk to allow the server to recover the data if it crashes.
1656 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1657 * is off, and a COMMIT is sent to a data server, or
1658 * if WRITEs to a data server return NFS_DATA_SYNC.
1659 */
1660 int
1661 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1662 {
1663 struct nfs4_layoutcommit_data *data;
1664 struct nfs_inode *nfsi = NFS_I(inode);
1665 loff_t end_pos;
1666 int status = 0;
1667
1668 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1669
1670 if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1671 return 0;
1672
1673 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1674 data = kzalloc(sizeof(*data), GFP_NOFS);
1675 if (!data) {
1676 status = -ENOMEM;
1677 goto out;
1678 }
1679
1680 if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1681 goto out_free;
1682
1683 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1684 if (!sync) {
1685 status = -EAGAIN;
1686 goto out_free;
1687 }
1688 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1689 nfs_wait_bit_killable, TASK_KILLABLE);
1690 if (status)
1691 goto out_free;
1692 }
1693
1694 INIT_LIST_HEAD(&data->lseg_list);
1695 spin_lock(&inode->i_lock);
1696 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1697 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1698 spin_unlock(&inode->i_lock);
1699 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1700 goto out_free;
1701 }
1702
1703 pnfs_list_write_lseg(inode, &data->lseg_list);
1704
1705 end_pos = nfsi->layout->plh_lwb;
1706 nfsi->layout->plh_lwb = 0;
1707
1708 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1709 spin_unlock(&inode->i_lock);
1710
1711 data->args.inode = inode;
1712 data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1713 nfs_fattr_init(&data->fattr);
1714 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1715 data->res.fattr = &data->fattr;
1716 data->args.lastbytewritten = end_pos - 1;
1717 data->res.server = NFS_SERVER(inode);
1718
1719 status = nfs4_proc_layoutcommit(data, sync);
1720 out:
1721 if (status)
1722 mark_inode_dirty_sync(inode);
1723 dprintk("<-- %s status %d\n", __func__, status);
1724 return status;
1725 out_free:
1726 kfree(data);
1727 goto out;
1728 }
1729
1730 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1731 {
1732 struct nfs4_threshold *thp;
1733
1734 thp = kzalloc(sizeof(*thp), GFP_NOFS);
1735 if (!thp) {
1736 dprintk("%s mdsthreshold allocation failed\n", __func__);
1737 return NULL;
1738 }
1739 return thp;
1740 }
This page took 0.06482 seconds and 6 git commands to generate.