pnfs: align layoutget requests on page boundaries
[deliverable/linux.git] / fs / nfs / pnfs.c
1 /*
2 * pNFS functions to call and manage layout drivers.
3 *
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 *
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
18 *
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
27 * such damages.
28 */
29
30 #include <linux/nfs_fs.h>
31 #include "internal.h"
32 #include "pnfs.h"
33 #include "iostat.h"
34
35 #define NFSDBG_FACILITY NFSDBG_PNFS
36
37 /* Locking:
38 *
39 * pnfs_spinlock:
40 * protects pnfs_modules_tbl.
41 */
42 static DEFINE_SPINLOCK(pnfs_spinlock);
43
44 /*
45 * pnfs_modules_tbl holds all pnfs modules
46 */
47 static LIST_HEAD(pnfs_modules_tbl);
48
49 /* Return the registered pnfs layout driver module matching given id */
50 static struct pnfs_layoutdriver_type *
51 find_pnfs_driver_locked(u32 id)
52 {
53 struct pnfs_layoutdriver_type *local;
54
55 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
56 if (local->id == id)
57 goto out;
58 local = NULL;
59 out:
60 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
61 return local;
62 }
63
64 static struct pnfs_layoutdriver_type *
65 find_pnfs_driver(u32 id)
66 {
67 struct pnfs_layoutdriver_type *local;
68
69 spin_lock(&pnfs_spinlock);
70 local = find_pnfs_driver_locked(id);
71 spin_unlock(&pnfs_spinlock);
72 return local;
73 }
74
75 void
76 unset_pnfs_layoutdriver(struct nfs_server *nfss)
77 {
78 if (nfss->pnfs_curr_ld)
79 module_put(nfss->pnfs_curr_ld->owner);
80 nfss->pnfs_curr_ld = NULL;
81 }
82
83 /*
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
86 *
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
88 */
89 void
90 set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
91 {
92 struct pnfs_layoutdriver_type *ld_type = NULL;
93
94 if (id == 0)
95 goto out_no_driver;
96 if (!(server->nfs_client->cl_exchange_flags &
97 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
98 printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
99 id, server->nfs_client->cl_exchange_flags);
100 goto out_no_driver;
101 }
102 ld_type = find_pnfs_driver(id);
103 if (!ld_type) {
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
105 ld_type = find_pnfs_driver(id);
106 if (!ld_type) {
107 dprintk("%s: No pNFS module found for %u.\n",
108 __func__, id);
109 goto out_no_driver;
110 }
111 }
112 if (!try_module_get(ld_type->owner)) {
113 dprintk("%s: Could not grab reference on module\n", __func__);
114 goto out_no_driver;
115 }
116 server->pnfs_curr_ld = ld_type;
117
118 dprintk("%s: pNFS module for %u set\n", __func__, id);
119 return;
120
121 out_no_driver:
122 dprintk("%s: Using NFSv4 I/O\n", __func__);
123 server->pnfs_curr_ld = NULL;
124 }
125
126 int
127 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
128 {
129 int status = -EINVAL;
130 struct pnfs_layoutdriver_type *tmp;
131
132 if (ld_type->id == 0) {
133 printk(KERN_ERR "%s id 0 is reserved\n", __func__);
134 return status;
135 }
136 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
137 printk(KERN_ERR "%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__);
139 return status;
140 }
141
142 spin_lock(&pnfs_spinlock);
143 tmp = find_pnfs_driver_locked(ld_type->id);
144 if (!tmp) {
145 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
146 status = 0;
147 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
148 ld_type->name);
149 } else {
150 printk(KERN_ERR "%s Module with id %d already loaded!\n",
151 __func__, ld_type->id);
152 }
153 spin_unlock(&pnfs_spinlock);
154
155 return status;
156 }
157 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
158
159 void
160 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
161 {
162 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
163 spin_lock(&pnfs_spinlock);
164 list_del(&ld_type->pnfs_tblid);
165 spin_unlock(&pnfs_spinlock);
166 }
167 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
168
169 /*
170 * pNFS client layout cache
171 */
172
173 /* Need to hold i_lock if caller does not already hold reference */
174 void
175 get_layout_hdr(struct pnfs_layout_hdr *lo)
176 {
177 atomic_inc(&lo->plh_refcount);
178 }
179
180 static void
181 destroy_layout_hdr(struct pnfs_layout_hdr *lo)
182 {
183 dprintk("%s: freeing layout cache %p\n", __func__, lo);
184 BUG_ON(!list_empty(&lo->plh_layouts));
185 NFS_I(lo->plh_inode)->layout = NULL;
186 kfree(lo);
187 }
188
189 static void
190 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
191 {
192 if (atomic_dec_and_test(&lo->plh_refcount))
193 destroy_layout_hdr(lo);
194 }
195
196 void
197 put_layout_hdr(struct pnfs_layout_hdr *lo)
198 {
199 struct inode *inode = lo->plh_inode;
200
201 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
202 destroy_layout_hdr(lo);
203 spin_unlock(&inode->i_lock);
204 }
205 }
206
207 static void
208 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
209 {
210 INIT_LIST_HEAD(&lseg->pls_list);
211 atomic_set(&lseg->pls_refcount, 1);
212 smp_mb();
213 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
214 lseg->pls_layout = lo;
215 }
216
217 static void free_lseg(struct pnfs_layout_segment *lseg)
218 {
219 struct inode *ino = lseg->pls_layout->plh_inode;
220
221 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
222 /* Matched by get_layout_hdr in pnfs_insert_layout */
223 put_layout_hdr(NFS_I(ino)->layout);
224 }
225
226 static void
227 put_lseg_common(struct pnfs_layout_segment *lseg)
228 {
229 struct inode *inode = lseg->pls_layout->plh_inode;
230
231 BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
232 list_del_init(&lseg->pls_list);
233 if (list_empty(&lseg->pls_layout->plh_segs)) {
234 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg->pls_layout);
237 }
238 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
239 }
240
241 void
242 put_lseg(struct pnfs_layout_segment *lseg)
243 {
244 struct inode *inode;
245
246 if (!lseg)
247 return;
248
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
250 atomic_read(&lseg->pls_refcount),
251 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
252 inode = lseg->pls_layout->plh_inode;
253 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
254 LIST_HEAD(free_me);
255
256 put_lseg_common(lseg);
257 list_add(&lseg->pls_list, &free_me);
258 spin_unlock(&inode->i_lock);
259 pnfs_free_lseg_list(&free_me);
260 }
261 }
262 EXPORT_SYMBOL_GPL(put_lseg);
263
264 static inline u64
265 end_offset(u64 start, u64 len)
266 {
267 u64 end;
268
269 end = start + len;
270 return end >= start ? end : NFS4_MAX_UINT64;
271 }
272
273 /* last octet in a range */
274 static inline u64
275 last_byte_offset(u64 start, u64 len)
276 {
277 u64 end;
278
279 BUG_ON(!len);
280 end = start + len;
281 return end > start ? end - 1 : NFS4_MAX_UINT64;
282 }
283
284 /*
285 * is l2 fully contained in l1?
286 * start1 end1
287 * [----------------------------------)
288 * start2 end2
289 * [----------------)
290 */
291 static inline int
292 lo_seg_contained(struct pnfs_layout_range *l1,
293 struct pnfs_layout_range *l2)
294 {
295 u64 start1 = l1->offset;
296 u64 end1 = end_offset(start1, l1->length);
297 u64 start2 = l2->offset;
298 u64 end2 = end_offset(start2, l2->length);
299
300 return (start1 <= start2) && (end1 >= end2);
301 }
302
303 /*
304 * is l1 and l2 intersecting?
305 * start1 end1
306 * [----------------------------------)
307 * start2 end2
308 * [----------------)
309 */
310 static inline int
311 lo_seg_intersecting(struct pnfs_layout_range *l1,
312 struct pnfs_layout_range *l2)
313 {
314 u64 start1 = l1->offset;
315 u64 end1 = end_offset(start1, l1->length);
316 u64 start2 = l2->offset;
317 u64 end2 = end_offset(start2, l2->length);
318
319 return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
320 (end2 == NFS4_MAX_UINT64 || end2 > start1);
321 }
322
323 static bool
324 should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
325 {
326 return (recall_iomode == IOMODE_ANY ||
327 lseg_iomode == recall_iomode);
328 }
329
330 /* Returns 1 if lseg is removed from list, 0 otherwise */
331 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
332 struct list_head *tmp_list)
333 {
334 int rv = 0;
335
336 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
337 /* Remove the reference keeping the lseg in the
338 * list. It will now be removed when all
339 * outstanding io is finished.
340 */
341 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
342 atomic_read(&lseg->pls_refcount));
343 if (atomic_dec_and_test(&lseg->pls_refcount)) {
344 put_lseg_common(lseg);
345 list_add(&lseg->pls_list, tmp_list);
346 rv = 1;
347 }
348 }
349 return rv;
350 }
351
352 /* Returns count of number of matching invalid lsegs remaining in list
353 * after call.
354 */
355 int
356 mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
357 struct list_head *tmp_list,
358 u32 iomode)
359 {
360 struct pnfs_layout_segment *lseg, *next;
361 int invalid = 0, removed = 0;
362
363 dprintk("%s:Begin lo %p\n", __func__, lo);
364
365 if (list_empty(&lo->plh_segs)) {
366 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
367 put_layout_hdr_locked(lo);
368 return 0;
369 }
370 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
371 if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
372 dprintk("%s: freeing lseg %p iomode %d "
373 "offset %llu length %llu\n", __func__,
374 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
375 lseg->pls_range.length);
376 invalid++;
377 removed += mark_lseg_invalid(lseg, tmp_list);
378 }
379 dprintk("%s:Return %i\n", __func__, invalid - removed);
380 return invalid - removed;
381 }
382
383 /* note free_me must contain lsegs from a single layout_hdr */
384 void
385 pnfs_free_lseg_list(struct list_head *free_me)
386 {
387 struct pnfs_layout_segment *lseg, *tmp;
388 struct pnfs_layout_hdr *lo;
389
390 if (list_empty(free_me))
391 return;
392
393 lo = list_first_entry(free_me, struct pnfs_layout_segment,
394 pls_list)->pls_layout;
395
396 if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
397 struct nfs_client *clp;
398
399 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
400 spin_lock(&clp->cl_lock);
401 list_del_init(&lo->plh_layouts);
402 spin_unlock(&clp->cl_lock);
403 }
404 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
405 list_del(&lseg->pls_list);
406 free_lseg(lseg);
407 }
408 }
409
410 void
411 pnfs_destroy_layout(struct nfs_inode *nfsi)
412 {
413 struct pnfs_layout_hdr *lo;
414 LIST_HEAD(tmp_list);
415
416 spin_lock(&nfsi->vfs_inode.i_lock);
417 lo = nfsi->layout;
418 if (lo) {
419 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
420 mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
421 }
422 spin_unlock(&nfsi->vfs_inode.i_lock);
423 pnfs_free_lseg_list(&tmp_list);
424 }
425
426 /*
427 * Called by the state manger to remove all layouts established under an
428 * expired lease.
429 */
430 void
431 pnfs_destroy_all_layouts(struct nfs_client *clp)
432 {
433 struct pnfs_layout_hdr *lo;
434 LIST_HEAD(tmp_list);
435
436 spin_lock(&clp->cl_lock);
437 list_splice_init(&clp->cl_layouts, &tmp_list);
438 spin_unlock(&clp->cl_lock);
439
440 while (!list_empty(&tmp_list)) {
441 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
442 plh_layouts);
443 dprintk("%s freeing layout for inode %lu\n", __func__,
444 lo->plh_inode->i_ino);
445 list_del_init(&lo->plh_layouts);
446 pnfs_destroy_layout(NFS_I(lo->plh_inode));
447 }
448 }
449
450 /* update lo->plh_stateid with new if is more recent */
451 void
452 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
453 bool update_barrier)
454 {
455 u32 oldseq, newseq;
456
457 oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
458 newseq = be32_to_cpu(new->stateid.seqid);
459 if ((int)(newseq - oldseq) > 0) {
460 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
461 if (update_barrier) {
462 u32 new_barrier = be32_to_cpu(new->stateid.seqid);
463
464 if ((int)(new_barrier - lo->plh_barrier))
465 lo->plh_barrier = new_barrier;
466 } else {
467 /* Because of wraparound, we want to keep the barrier
468 * "close" to the current seqids. It needs to be
469 * within 2**31 to count as "behind", so if it
470 * gets too near that limit, give us a litle leeway
471 * and bring it to within 2**30.
472 * NOTE - and yes, this is all unsigned arithmetic.
473 */
474 if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
475 lo->plh_barrier = newseq - (1 << 30);
476 }
477 }
478 }
479
480 /* lget is set to 1 if called from inside send_layoutget call chain */
481 static bool
482 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
483 int lget)
484 {
485 if ((stateid) &&
486 (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
487 return true;
488 return lo->plh_block_lgets ||
489 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
490 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
491 (list_empty(&lo->plh_segs) &&
492 (atomic_read(&lo->plh_outstanding) > lget));
493 }
494
495 int
496 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
497 struct nfs4_state *open_state)
498 {
499 int status = 0;
500
501 dprintk("--> %s\n", __func__);
502 spin_lock(&lo->plh_inode->i_lock);
503 if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
504 status = -EAGAIN;
505 } else if (list_empty(&lo->plh_segs)) {
506 int seq;
507
508 do {
509 seq = read_seqbegin(&open_state->seqlock);
510 memcpy(dst->data, open_state->stateid.data,
511 sizeof(open_state->stateid.data));
512 } while (read_seqretry(&open_state->seqlock, seq));
513 } else
514 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
515 spin_unlock(&lo->plh_inode->i_lock);
516 dprintk("<-- %s\n", __func__);
517 return status;
518 }
519
520 /*
521 * Get layout from server.
522 * for now, assume that whole file layouts are requested.
523 * arg->offset: 0
524 * arg->length: all ones
525 */
526 static struct pnfs_layout_segment *
527 send_layoutget(struct pnfs_layout_hdr *lo,
528 struct nfs_open_context *ctx,
529 struct pnfs_layout_range *range,
530 gfp_t gfp_flags)
531 {
532 struct inode *ino = lo->plh_inode;
533 struct nfs_server *server = NFS_SERVER(ino);
534 struct nfs4_layoutget *lgp;
535 struct pnfs_layout_segment *lseg = NULL;
536 struct page **pages = NULL;
537 int i;
538 u32 max_resp_sz, max_pages;
539
540 dprintk("--> %s\n", __func__);
541
542 BUG_ON(ctx == NULL);
543 lgp = kzalloc(sizeof(*lgp), gfp_flags);
544 if (lgp == NULL)
545 return NULL;
546
547 /* allocate pages for xdr post processing */
548 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
549 max_pages = max_resp_sz >> PAGE_SHIFT;
550
551 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
552 if (!pages)
553 goto out_err_free;
554
555 for (i = 0; i < max_pages; i++) {
556 pages[i] = alloc_page(gfp_flags);
557 if (!pages[i])
558 goto out_err_free;
559 }
560
561 lgp->args.minlength = PAGE_CACHE_SIZE;
562 if (lgp->args.minlength > range->length)
563 lgp->args.minlength = range->length;
564 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
565 lgp->args.range = *range;
566 lgp->args.type = server->pnfs_curr_ld->id;
567 lgp->args.inode = ino;
568 lgp->args.ctx = get_nfs_open_context(ctx);
569 lgp->args.layout.pages = pages;
570 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
571 lgp->lsegpp = &lseg;
572 lgp->gfp_flags = gfp_flags;
573
574 /* Synchronously retrieve layout information from server and
575 * store in lseg.
576 */
577 nfs4_proc_layoutget(lgp);
578 if (!lseg) {
579 /* remember that LAYOUTGET failed and suspend trying */
580 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
581 }
582
583 /* free xdr pages */
584 for (i = 0; i < max_pages; i++)
585 __free_page(pages[i]);
586 kfree(pages);
587
588 return lseg;
589
590 out_err_free:
591 /* free any allocated xdr pages, lgp as it's not used */
592 if (pages) {
593 for (i = 0; i < max_pages; i++) {
594 if (!pages[i])
595 break;
596 __free_page(pages[i]);
597 }
598 kfree(pages);
599 }
600 kfree(lgp);
601 return NULL;
602 }
603
604 bool pnfs_roc(struct inode *ino)
605 {
606 struct pnfs_layout_hdr *lo;
607 struct pnfs_layout_segment *lseg, *tmp;
608 LIST_HEAD(tmp_list);
609 bool found = false;
610
611 spin_lock(&ino->i_lock);
612 lo = NFS_I(ino)->layout;
613 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
614 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
615 goto out_nolayout;
616 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
617 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
618 mark_lseg_invalid(lseg, &tmp_list);
619 found = true;
620 }
621 if (!found)
622 goto out_nolayout;
623 lo->plh_block_lgets++;
624 get_layout_hdr(lo); /* matched in pnfs_roc_release */
625 spin_unlock(&ino->i_lock);
626 pnfs_free_lseg_list(&tmp_list);
627 return true;
628
629 out_nolayout:
630 spin_unlock(&ino->i_lock);
631 return false;
632 }
633
634 void pnfs_roc_release(struct inode *ino)
635 {
636 struct pnfs_layout_hdr *lo;
637
638 spin_lock(&ino->i_lock);
639 lo = NFS_I(ino)->layout;
640 lo->plh_block_lgets--;
641 put_layout_hdr_locked(lo);
642 spin_unlock(&ino->i_lock);
643 }
644
645 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
646 {
647 struct pnfs_layout_hdr *lo;
648
649 spin_lock(&ino->i_lock);
650 lo = NFS_I(ino)->layout;
651 if ((int)(barrier - lo->plh_barrier) > 0)
652 lo->plh_barrier = barrier;
653 spin_unlock(&ino->i_lock);
654 }
655
656 bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
657 {
658 struct nfs_inode *nfsi = NFS_I(ino);
659 struct pnfs_layout_segment *lseg;
660 bool found = false;
661
662 spin_lock(&ino->i_lock);
663 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
664 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
665 found = true;
666 break;
667 }
668 if (!found) {
669 struct pnfs_layout_hdr *lo = nfsi->layout;
670 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
671
672 /* Since close does not return a layout stateid for use as
673 * a barrier, we choose the worst-case barrier.
674 */
675 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
676 }
677 spin_unlock(&ino->i_lock);
678 return found;
679 }
680
681 /*
682 * Compare two layout segments for sorting into layout cache.
683 * We want to preferentially return RW over RO layouts, so ensure those
684 * are seen first.
685 */
686 static s64
687 cmp_layout(struct pnfs_layout_range *l1,
688 struct pnfs_layout_range *l2)
689 {
690 s64 d;
691
692 /* high offset > low offset */
693 d = l1->offset - l2->offset;
694 if (d)
695 return d;
696
697 /* short length > long length */
698 d = l2->length - l1->length;
699 if (d)
700 return d;
701
702 /* read > read/write */
703 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
704 }
705
706 static void
707 pnfs_insert_layout(struct pnfs_layout_hdr *lo,
708 struct pnfs_layout_segment *lseg)
709 {
710 struct pnfs_layout_segment *lp;
711
712 dprintk("%s:Begin\n", __func__);
713
714 assert_spin_locked(&lo->plh_inode->i_lock);
715 list_for_each_entry(lp, &lo->plh_segs, pls_list) {
716 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
717 continue;
718 list_add_tail(&lseg->pls_list, &lp->pls_list);
719 dprintk("%s: inserted lseg %p "
720 "iomode %d offset %llu length %llu before "
721 "lp %p iomode %d offset %llu length %llu\n",
722 __func__, lseg, lseg->pls_range.iomode,
723 lseg->pls_range.offset, lseg->pls_range.length,
724 lp, lp->pls_range.iomode, lp->pls_range.offset,
725 lp->pls_range.length);
726 goto out;
727 }
728 list_add_tail(&lseg->pls_list, &lo->plh_segs);
729 dprintk("%s: inserted lseg %p "
730 "iomode %d offset %llu length %llu at tail\n",
731 __func__, lseg, lseg->pls_range.iomode,
732 lseg->pls_range.offset, lseg->pls_range.length);
733 out:
734 get_layout_hdr(lo);
735
736 dprintk("%s:Return\n", __func__);
737 }
738
739 static struct pnfs_layout_hdr *
740 alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
741 {
742 struct pnfs_layout_hdr *lo;
743
744 lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
745 if (!lo)
746 return NULL;
747 atomic_set(&lo->plh_refcount, 1);
748 INIT_LIST_HEAD(&lo->plh_layouts);
749 INIT_LIST_HEAD(&lo->plh_segs);
750 INIT_LIST_HEAD(&lo->plh_bulk_recall);
751 lo->plh_inode = ino;
752 return lo;
753 }
754
755 static struct pnfs_layout_hdr *
756 pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
757 {
758 struct nfs_inode *nfsi = NFS_I(ino);
759 struct pnfs_layout_hdr *new = NULL;
760
761 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
762
763 assert_spin_locked(&ino->i_lock);
764 if (nfsi->layout) {
765 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
766 return NULL;
767 else
768 return nfsi->layout;
769 }
770 spin_unlock(&ino->i_lock);
771 new = alloc_init_layout_hdr(ino, gfp_flags);
772 spin_lock(&ino->i_lock);
773
774 if (likely(nfsi->layout == NULL)) /* Won the race? */
775 nfsi->layout = new;
776 else
777 kfree(new);
778 return nfsi->layout;
779 }
780
781 /*
782 * iomode matching rules:
783 * iomode lseg match
784 * ----- ----- -----
785 * ANY READ true
786 * ANY RW true
787 * RW READ false
788 * RW RW true
789 * READ READ true
790 * READ RW true
791 */
792 static int
793 is_matching_lseg(struct pnfs_layout_range *ls_range,
794 struct pnfs_layout_range *range)
795 {
796 struct pnfs_layout_range range1;
797
798 if ((range->iomode == IOMODE_RW &&
799 ls_range->iomode != IOMODE_RW) ||
800 !lo_seg_intersecting(ls_range, range))
801 return 0;
802
803 /* range1 covers only the first byte in the range */
804 range1 = *range;
805 range1.length = 1;
806 return lo_seg_contained(ls_range, &range1);
807 }
808
809 /*
810 * lookup range in layout
811 */
812 static struct pnfs_layout_segment *
813 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
814 struct pnfs_layout_range *range)
815 {
816 struct pnfs_layout_segment *lseg, *ret = NULL;
817
818 dprintk("%s:Begin\n", __func__);
819
820 assert_spin_locked(&lo->plh_inode->i_lock);
821 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
822 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
823 is_matching_lseg(&lseg->pls_range, range)) {
824 ret = get_lseg(lseg);
825 break;
826 }
827 if (cmp_layout(range, &lseg->pls_range) > 0)
828 break;
829 }
830
831 dprintk("%s:Return lseg %p ref %d\n",
832 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
833 return ret;
834 }
835
836 /*
837 * Layout segment is retreived from the server if not cached.
838 * The appropriate layout segment is referenced and returned to the caller.
839 */
840 struct pnfs_layout_segment *
841 pnfs_update_layout(struct inode *ino,
842 struct nfs_open_context *ctx,
843 loff_t pos,
844 u64 count,
845 enum pnfs_iomode iomode,
846 gfp_t gfp_flags)
847 {
848 struct pnfs_layout_range arg = {
849 .iomode = iomode,
850 .offset = pos,
851 .length = count,
852 };
853 unsigned pg_offset;
854 struct nfs_inode *nfsi = NFS_I(ino);
855 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
856 struct pnfs_layout_hdr *lo;
857 struct pnfs_layout_segment *lseg = NULL;
858 bool first = false;
859
860 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
861 return NULL;
862 spin_lock(&ino->i_lock);
863 lo = pnfs_find_alloc_layout(ino, gfp_flags);
864 if (lo == NULL) {
865 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
866 goto out_unlock;
867 }
868
869 /* Do we even need to bother with this? */
870 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
871 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
872 dprintk("%s matches recall, use MDS\n", __func__);
873 goto out_unlock;
874 }
875
876 /* if LAYOUTGET already failed once we don't try again */
877 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
878 goto out_unlock;
879
880 /* Check to see if the layout for the given range already exists */
881 lseg = pnfs_find_lseg(lo, &arg);
882 if (lseg)
883 goto out_unlock;
884
885 if (pnfs_layoutgets_blocked(lo, NULL, 0))
886 goto out_unlock;
887 atomic_inc(&lo->plh_outstanding);
888
889 get_layout_hdr(lo);
890 if (list_empty(&lo->plh_segs))
891 first = true;
892 spin_unlock(&ino->i_lock);
893 if (first) {
894 /* The lo must be on the clp list if there is any
895 * chance of a CB_LAYOUTRECALL(FILE) coming in.
896 */
897 spin_lock(&clp->cl_lock);
898 BUG_ON(!list_empty(&lo->plh_layouts));
899 list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
900 spin_unlock(&clp->cl_lock);
901 }
902
903 pg_offset = arg.offset & ~PAGE_CACHE_MASK;
904 if (pg_offset) {
905 arg.offset -= pg_offset;
906 arg.length += pg_offset;
907 }
908 arg.length = PAGE_CACHE_ALIGN(arg.length);
909
910 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
911 if (!lseg && first) {
912 spin_lock(&clp->cl_lock);
913 list_del_init(&lo->plh_layouts);
914 spin_unlock(&clp->cl_lock);
915 }
916 atomic_dec(&lo->plh_outstanding);
917 put_layout_hdr(lo);
918 out:
919 dprintk("%s end, state 0x%lx lseg %p\n", __func__,
920 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
921 return lseg;
922 out_unlock:
923 spin_unlock(&ino->i_lock);
924 goto out;
925 }
926
927 int
928 pnfs_layout_process(struct nfs4_layoutget *lgp)
929 {
930 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
931 struct nfs4_layoutget_res *res = &lgp->res;
932 struct pnfs_layout_segment *lseg;
933 struct inode *ino = lo->plh_inode;
934 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
935 int status = 0;
936
937 /* Inject layout blob into I/O device driver */
938 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
939 if (!lseg || IS_ERR(lseg)) {
940 if (!lseg)
941 status = -ENOMEM;
942 else
943 status = PTR_ERR(lseg);
944 dprintk("%s: Could not allocate layout: error %d\n",
945 __func__, status);
946 goto out;
947 }
948
949 spin_lock(&ino->i_lock);
950 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
951 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
952 dprintk("%s forget reply due to recall\n", __func__);
953 goto out_forget_reply;
954 }
955
956 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
957 dprintk("%s forget reply due to state\n", __func__);
958 goto out_forget_reply;
959 }
960 init_lseg(lo, lseg);
961 lseg->pls_range = res->range;
962 *lgp->lsegpp = get_lseg(lseg);
963 pnfs_insert_layout(lo, lseg);
964
965 if (res->return_on_close) {
966 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
967 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
968 }
969
970 /* Done processing layoutget. Set the layout stateid */
971 pnfs_set_layout_stateid(lo, &res->stateid, false);
972 spin_unlock(&ino->i_lock);
973 out:
974 return status;
975
976 out_forget_reply:
977 spin_unlock(&ino->i_lock);
978 lseg->pls_layout = lo;
979 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
980 goto out;
981 }
982
983 static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
984 struct nfs_page *prev,
985 struct nfs_page *req)
986 {
987 if (pgio->pg_count == prev->wb_bytes) {
988 /* This is first coelesce call for a series of nfs_pages */
989 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
990 prev->wb_context,
991 req_offset(req),
992 pgio->pg_count,
993 IOMODE_READ,
994 GFP_KERNEL);
995 } else if (pgio->pg_lseg &&
996 req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
997 pgio->pg_lseg->pls_range.length))
998 return 0;
999 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
1000 }
1001
1002 void
1003 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
1004 {
1005 struct pnfs_layoutdriver_type *ld;
1006
1007 ld = NFS_SERVER(inode)->pnfs_curr_ld;
1008 pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
1009 }
1010
1011 static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
1012 struct nfs_page *prev,
1013 struct nfs_page *req)
1014 {
1015 if (pgio->pg_count == prev->wb_bytes) {
1016 /* This is first coelesce call for a series of nfs_pages */
1017 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1018 prev->wb_context,
1019 req_offset(req),
1020 pgio->pg_count,
1021 IOMODE_RW,
1022 GFP_NOFS);
1023 } else if (pgio->pg_lseg &&
1024 req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
1025 pgio->pg_lseg->pls_range.length))
1026 return 0;
1027 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
1028 }
1029
1030 void
1031 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode)
1032 {
1033 struct pnfs_layoutdriver_type *ld;
1034
1035 ld = NFS_SERVER(inode)->pnfs_curr_ld;
1036 pgio->pg_test = (ld && ld->pg_test) ? pnfs_write_pg_test : NULL;
1037 }
1038
1039 enum pnfs_try_status
1040 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1041 const struct rpc_call_ops *call_ops, int how)
1042 {
1043 struct inode *inode = wdata->inode;
1044 enum pnfs_try_status trypnfs;
1045 struct nfs_server *nfss = NFS_SERVER(inode);
1046
1047 wdata->mds_ops = call_ops;
1048
1049 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1050 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1051
1052 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1053 if (trypnfs == PNFS_NOT_ATTEMPTED) {
1054 put_lseg(wdata->lseg);
1055 wdata->lseg = NULL;
1056 } else
1057 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1058
1059 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1060 return trypnfs;
1061 }
1062
1063 /*
1064 * Call the appropriate parallel I/O subsystem read function.
1065 */
1066 enum pnfs_try_status
1067 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1068 const struct rpc_call_ops *call_ops)
1069 {
1070 struct inode *inode = rdata->inode;
1071 struct nfs_server *nfss = NFS_SERVER(inode);
1072 enum pnfs_try_status trypnfs;
1073
1074 rdata->mds_ops = call_ops;
1075
1076 dprintk("%s: Reading ino:%lu %u@%llu\n",
1077 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1078
1079 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1080 if (trypnfs == PNFS_NOT_ATTEMPTED) {
1081 put_lseg(rdata->lseg);
1082 rdata->lseg = NULL;
1083 } else {
1084 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1085 }
1086 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1087 return trypnfs;
1088 }
1089
1090 /*
1091 * Currently there is only one (whole file) write lseg.
1092 */
1093 static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode)
1094 {
1095 struct pnfs_layout_segment *lseg, *rv = NULL;
1096
1097 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
1098 if (lseg->pls_range.iomode == IOMODE_RW)
1099 rv = lseg;
1100 return rv;
1101 }
1102
1103 void
1104 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1105 {
1106 struct nfs_inode *nfsi = NFS_I(wdata->inode);
1107 loff_t end_pos = wdata->args.offset + wdata->res.count;
1108 bool mark_as_dirty = false;
1109
1110 spin_lock(&nfsi->vfs_inode.i_lock);
1111 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1112 /* references matched in nfs4_layoutcommit_release */
1113 get_lseg(wdata->lseg);
1114 wdata->lseg->pls_lc_cred =
1115 get_rpccred(wdata->args.context->state->owner->so_cred);
1116 mark_as_dirty = true;
1117 dprintk("%s: Set layoutcommit for inode %lu ",
1118 __func__, wdata->inode->i_ino);
1119 }
1120 if (end_pos > wdata->lseg->pls_end_pos)
1121 wdata->lseg->pls_end_pos = end_pos;
1122 spin_unlock(&nfsi->vfs_inode.i_lock);
1123
1124 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1125 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1126 if (mark_as_dirty)
1127 mark_inode_dirty_sync(wdata->inode);
1128 }
1129 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1130
1131 /*
1132 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1133 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1134 * data to disk to allow the server to recover the data if it crashes.
1135 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1136 * is off, and a COMMIT is sent to a data server, or
1137 * if WRITEs to a data server return NFS_DATA_SYNC.
1138 */
1139 int
1140 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1141 {
1142 struct nfs4_layoutcommit_data *data;
1143 struct nfs_inode *nfsi = NFS_I(inode);
1144 struct pnfs_layout_segment *lseg;
1145 struct rpc_cred *cred;
1146 loff_t end_pos;
1147 int status = 0;
1148
1149 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1150
1151 if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1152 return 0;
1153
1154 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1155 data = kzalloc(sizeof(*data), GFP_NOFS);
1156 if (!data) {
1157 mark_inode_dirty_sync(inode);
1158 status = -ENOMEM;
1159 goto out;
1160 }
1161
1162 spin_lock(&inode->i_lock);
1163 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1164 spin_unlock(&inode->i_lock);
1165 kfree(data);
1166 goto out;
1167 }
1168 /*
1169 * Currently only one (whole file) write lseg which is referenced
1170 * in pnfs_set_layoutcommit and will be found.
1171 */
1172 lseg = pnfs_list_write_lseg(inode);
1173
1174 end_pos = lseg->pls_end_pos;
1175 cred = lseg->pls_lc_cred;
1176 lseg->pls_end_pos = 0;
1177 lseg->pls_lc_cred = NULL;
1178
1179 memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
1180 sizeof(nfsi->layout->plh_stateid.data));
1181 spin_unlock(&inode->i_lock);
1182
1183 data->args.inode = inode;
1184 data->lseg = lseg;
1185 data->cred = cred;
1186 nfs_fattr_init(&data->fattr);
1187 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1188 data->res.fattr = &data->fattr;
1189 data->args.lastbytewritten = end_pos - 1;
1190 data->res.server = NFS_SERVER(inode);
1191
1192 status = nfs4_proc_layoutcommit(data, sync);
1193 out:
1194 dprintk("<-- %s status %d\n", __func__, status);
1195 return status;
1196 }
This page took 0.076024 seconds and 6 git commands to generate.