2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
35 #define NFSDBG_FACILITY NFSDBG_PNFS
40 * protects pnfs_modules_tbl.
42 static DEFINE_SPINLOCK(pnfs_spinlock
);
45 * pnfs_modules_tbl holds all pnfs modules
47 static LIST_HEAD(pnfs_modules_tbl
);
49 /* Return the registered pnfs layout driver module matching given id */
50 static struct pnfs_layoutdriver_type
*
51 find_pnfs_driver_locked(u32 id
)
53 struct pnfs_layoutdriver_type
*local
;
55 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
60 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
64 static struct pnfs_layoutdriver_type
*
65 find_pnfs_driver(u32 id
)
67 struct pnfs_layoutdriver_type
*local
;
69 spin_lock(&pnfs_spinlock
);
70 local
= find_pnfs_driver_locked(id
);
71 spin_unlock(&pnfs_spinlock
);
76 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
78 if (nfss
->pnfs_curr_ld
)
79 module_put(nfss
->pnfs_curr_ld
->owner
);
80 nfss
->pnfs_curr_ld
= NULL
;
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
90 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
92 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
96 if (!(server
->nfs_client
->cl_exchange_flags
&
97 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
98 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
99 id
, server
->nfs_client
->cl_exchange_flags
);
102 ld_type
= find_pnfs_driver(id
);
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
105 ld_type
= find_pnfs_driver(id
);
107 dprintk("%s: No pNFS module found for %u.\n",
112 if (!try_module_get(ld_type
->owner
)) {
113 dprintk("%s: Could not grab reference on module\n", __func__
);
116 server
->pnfs_curr_ld
= ld_type
;
118 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
122 dprintk("%s: Using NFSv4 I/O\n", __func__
);
123 server
->pnfs_curr_ld
= NULL
;
127 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
129 int status
= -EINVAL
;
130 struct pnfs_layoutdriver_type
*tmp
;
132 if (ld_type
->id
== 0) {
133 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
136 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
137 printk(KERN_ERR
"%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__
);
142 spin_lock(&pnfs_spinlock
);
143 tmp
= find_pnfs_driver_locked(ld_type
->id
);
145 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
147 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
150 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
151 __func__
, ld_type
->id
);
153 spin_unlock(&pnfs_spinlock
);
157 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
160 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
162 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
163 spin_lock(&pnfs_spinlock
);
164 list_del(&ld_type
->pnfs_tblid
);
165 spin_unlock(&pnfs_spinlock
);
167 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
170 * pNFS client layout cache
173 /* Need to hold i_lock if caller does not already hold reference */
175 get_layout_hdr(struct pnfs_layout_hdr
*lo
)
177 atomic_inc(&lo
->plh_refcount
);
181 destroy_layout_hdr(struct pnfs_layout_hdr
*lo
)
183 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
184 BUG_ON(!list_empty(&lo
->plh_layouts
));
185 NFS_I(lo
->plh_inode
)->layout
= NULL
;
190 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
192 if (atomic_dec_and_test(&lo
->plh_refcount
))
193 destroy_layout_hdr(lo
);
197 put_layout_hdr(struct pnfs_layout_hdr
*lo
)
199 struct inode
*inode
= lo
->plh_inode
;
201 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
202 destroy_layout_hdr(lo
);
203 spin_unlock(&inode
->i_lock
);
208 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
210 INIT_LIST_HEAD(&lseg
->pls_list
);
211 atomic_set(&lseg
->pls_refcount
, 1);
213 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
214 lseg
->pls_layout
= lo
;
217 static void free_lseg(struct pnfs_layout_segment
*lseg
)
219 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
221 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
222 /* Matched by get_layout_hdr in pnfs_insert_layout */
223 put_layout_hdr(NFS_I(ino
)->layout
);
227 put_lseg_common(struct pnfs_layout_segment
*lseg
)
229 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
231 BUG_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
232 list_del_init(&lseg
->pls_list
);
233 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
234 set_bit(NFS_LAYOUT_DESTROYED
, &lseg
->pls_layout
->plh_flags
);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg
->pls_layout
);
238 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
242 put_lseg(struct pnfs_layout_segment
*lseg
)
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
250 atomic_read(&lseg
->pls_refcount
),
251 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
252 inode
= lseg
->pls_layout
->plh_inode
;
253 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
256 put_lseg_common(lseg
);
257 list_add(&lseg
->pls_list
, &free_me
);
258 spin_unlock(&inode
->i_lock
);
259 pnfs_free_lseg_list(&free_me
);
262 EXPORT_SYMBOL_GPL(put_lseg
);
265 end_offset(u64 start
, u64 len
)
270 return end
>= start
? end
: NFS4_MAX_UINT64
;
273 /* last octet in a range */
275 last_byte_offset(u64 start
, u64 len
)
281 return end
> start
? end
- 1 : NFS4_MAX_UINT64
;
285 * is l2 fully contained in l1?
287 * [----------------------------------)
292 lo_seg_contained(struct pnfs_layout_range
*l1
,
293 struct pnfs_layout_range
*l2
)
295 u64 start1
= l1
->offset
;
296 u64 end1
= end_offset(start1
, l1
->length
);
297 u64 start2
= l2
->offset
;
298 u64 end2
= end_offset(start2
, l2
->length
);
300 return (start1
<= start2
) && (end1
>= end2
);
304 * is l1 and l2 intersecting?
306 * [----------------------------------)
311 lo_seg_intersecting(struct pnfs_layout_range
*l1
,
312 struct pnfs_layout_range
*l2
)
314 u64 start1
= l1
->offset
;
315 u64 end1
= end_offset(start1
, l1
->length
);
316 u64 start2
= l2
->offset
;
317 u64 end2
= end_offset(start2
, l2
->length
);
319 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
320 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
324 should_free_lseg(u32 lseg_iomode
, u32 recall_iomode
)
326 return (recall_iomode
== IOMODE_ANY
||
327 lseg_iomode
== recall_iomode
);
330 /* Returns 1 if lseg is removed from list, 0 otherwise */
331 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
332 struct list_head
*tmp_list
)
336 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
337 /* Remove the reference keeping the lseg in the
338 * list. It will now be removed when all
339 * outstanding io is finished.
341 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
342 atomic_read(&lseg
->pls_refcount
));
343 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
344 put_lseg_common(lseg
);
345 list_add(&lseg
->pls_list
, tmp_list
);
352 /* Returns count of number of matching invalid lsegs remaining in list
356 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
357 struct list_head
*tmp_list
,
360 struct pnfs_layout_segment
*lseg
, *next
;
361 int invalid
= 0, removed
= 0;
363 dprintk("%s:Begin lo %p\n", __func__
, lo
);
365 if (list_empty(&lo
->plh_segs
)) {
366 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
))
367 put_layout_hdr_locked(lo
);
370 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
371 if (should_free_lseg(lseg
->pls_range
.iomode
, iomode
)) {
372 dprintk("%s: freeing lseg %p iomode %d "
373 "offset %llu length %llu\n", __func__
,
374 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
375 lseg
->pls_range
.length
);
377 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
379 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
380 return invalid
- removed
;
383 /* note free_me must contain lsegs from a single layout_hdr */
385 pnfs_free_lseg_list(struct list_head
*free_me
)
387 struct pnfs_layout_segment
*lseg
, *tmp
;
388 struct pnfs_layout_hdr
*lo
;
390 if (list_empty(free_me
))
393 lo
= list_first_entry(free_me
, struct pnfs_layout_segment
,
394 pls_list
)->pls_layout
;
396 if (test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
)) {
397 struct nfs_client
*clp
;
399 clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
400 spin_lock(&clp
->cl_lock
);
401 list_del_init(&lo
->plh_layouts
);
402 spin_unlock(&clp
->cl_lock
);
404 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
405 list_del(&lseg
->pls_list
);
411 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
413 struct pnfs_layout_hdr
*lo
;
416 spin_lock(&nfsi
->vfs_inode
.i_lock
);
419 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
420 mark_matching_lsegs_invalid(lo
, &tmp_list
, IOMODE_ANY
);
422 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
423 pnfs_free_lseg_list(&tmp_list
);
427 * Called by the state manger to remove all layouts established under an
431 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
433 struct pnfs_layout_hdr
*lo
;
436 spin_lock(&clp
->cl_lock
);
437 list_splice_init(&clp
->cl_layouts
, &tmp_list
);
438 spin_unlock(&clp
->cl_lock
);
440 while (!list_empty(&tmp_list
)) {
441 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
443 dprintk("%s freeing layout for inode %lu\n", __func__
,
444 lo
->plh_inode
->i_ino
);
445 list_del_init(&lo
->plh_layouts
);
446 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
450 /* update lo->plh_stateid with new if is more recent */
452 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
457 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
458 newseq
= be32_to_cpu(new->stateid
.seqid
);
459 if ((int)(newseq
- oldseq
) > 0) {
460 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
461 if (update_barrier
) {
462 u32 new_barrier
= be32_to_cpu(new->stateid
.seqid
);
464 if ((int)(new_barrier
- lo
->plh_barrier
))
465 lo
->plh_barrier
= new_barrier
;
467 /* Because of wraparound, we want to keep the barrier
468 * "close" to the current seqids. It needs to be
469 * within 2**31 to count as "behind", so if it
470 * gets too near that limit, give us a litle leeway
471 * and bring it to within 2**30.
472 * NOTE - and yes, this is all unsigned arithmetic.
474 if (unlikely((newseq
- lo
->plh_barrier
) > (3 << 29)))
475 lo
->plh_barrier
= newseq
- (1 << 30);
480 /* lget is set to 1 if called from inside send_layoutget call chain */
482 pnfs_layoutgets_blocked(struct pnfs_layout_hdr
*lo
, nfs4_stateid
*stateid
,
486 (int)(lo
->plh_barrier
- be32_to_cpu(stateid
->stateid
.seqid
)) >= 0)
488 return lo
->plh_block_lgets
||
489 test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
) ||
490 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
491 (list_empty(&lo
->plh_segs
) &&
492 (atomic_read(&lo
->plh_outstanding
) > lget
));
496 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
497 struct nfs4_state
*open_state
)
501 dprintk("--> %s\n", __func__
);
502 spin_lock(&lo
->plh_inode
->i_lock
);
503 if (pnfs_layoutgets_blocked(lo
, NULL
, 1)) {
505 } else if (list_empty(&lo
->plh_segs
)) {
509 seq
= read_seqbegin(&open_state
->seqlock
);
510 memcpy(dst
->data
, open_state
->stateid
.data
,
511 sizeof(open_state
->stateid
.data
));
512 } while (read_seqretry(&open_state
->seqlock
, seq
));
514 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
515 spin_unlock(&lo
->plh_inode
->i_lock
);
516 dprintk("<-- %s\n", __func__
);
521 * Get layout from server.
522 * for now, assume that whole file layouts are requested.
524 * arg->length: all ones
526 static struct pnfs_layout_segment
*
527 send_layoutget(struct pnfs_layout_hdr
*lo
,
528 struct nfs_open_context
*ctx
,
529 struct pnfs_layout_range
*range
,
532 struct inode
*ino
= lo
->plh_inode
;
533 struct nfs_server
*server
= NFS_SERVER(ino
);
534 struct nfs4_layoutget
*lgp
;
535 struct pnfs_layout_segment
*lseg
= NULL
;
536 struct page
**pages
= NULL
;
538 u32 max_resp_sz
, max_pages
;
540 dprintk("--> %s\n", __func__
);
543 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
547 /* allocate pages for xdr post processing */
548 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
549 max_pages
= max_resp_sz
>> PAGE_SHIFT
;
551 pages
= kzalloc(max_pages
* sizeof(struct page
*), gfp_flags
);
555 for (i
= 0; i
< max_pages
; i
++) {
556 pages
[i
] = alloc_page(gfp_flags
);
561 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
562 if (lgp
->args
.minlength
> range
->length
)
563 lgp
->args
.minlength
= range
->length
;
564 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
565 lgp
->args
.range
= *range
;
566 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
567 lgp
->args
.inode
= ino
;
568 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
569 lgp
->args
.layout
.pages
= pages
;
570 lgp
->args
.layout
.pglen
= max_pages
* PAGE_SIZE
;
572 lgp
->gfp_flags
= gfp_flags
;
574 /* Synchronously retrieve layout information from server and
577 nfs4_proc_layoutget(lgp
);
579 /* remember that LAYOUTGET failed and suspend trying */
580 set_bit(lo_fail_bit(range
->iomode
), &lo
->plh_flags
);
584 for (i
= 0; i
< max_pages
; i
++)
585 __free_page(pages
[i
]);
591 /* free any allocated xdr pages, lgp as it's not used */
593 for (i
= 0; i
< max_pages
; i
++) {
596 __free_page(pages
[i
]);
604 bool pnfs_roc(struct inode
*ino
)
606 struct pnfs_layout_hdr
*lo
;
607 struct pnfs_layout_segment
*lseg
, *tmp
;
611 spin_lock(&ino
->i_lock
);
612 lo
= NFS_I(ino
)->layout
;
613 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
614 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
616 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
617 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
618 mark_lseg_invalid(lseg
, &tmp_list
);
623 lo
->plh_block_lgets
++;
624 get_layout_hdr(lo
); /* matched in pnfs_roc_release */
625 spin_unlock(&ino
->i_lock
);
626 pnfs_free_lseg_list(&tmp_list
);
630 spin_unlock(&ino
->i_lock
);
634 void pnfs_roc_release(struct inode
*ino
)
636 struct pnfs_layout_hdr
*lo
;
638 spin_lock(&ino
->i_lock
);
639 lo
= NFS_I(ino
)->layout
;
640 lo
->plh_block_lgets
--;
641 put_layout_hdr_locked(lo
);
642 spin_unlock(&ino
->i_lock
);
645 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
647 struct pnfs_layout_hdr
*lo
;
649 spin_lock(&ino
->i_lock
);
650 lo
= NFS_I(ino
)->layout
;
651 if ((int)(barrier
- lo
->plh_barrier
) > 0)
652 lo
->plh_barrier
= barrier
;
653 spin_unlock(&ino
->i_lock
);
656 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
)
658 struct nfs_inode
*nfsi
= NFS_I(ino
);
659 struct pnfs_layout_segment
*lseg
;
662 spin_lock(&ino
->i_lock
);
663 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
664 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
669 struct pnfs_layout_hdr
*lo
= nfsi
->layout
;
670 u32 current_seqid
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
672 /* Since close does not return a layout stateid for use as
673 * a barrier, we choose the worst-case barrier.
675 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
677 spin_unlock(&ino
->i_lock
);
682 * Compare two layout segments for sorting into layout cache.
683 * We want to preferentially return RW over RO layouts, so ensure those
687 cmp_layout(struct pnfs_layout_range
*l1
,
688 struct pnfs_layout_range
*l2
)
692 /* high offset > low offset */
693 d
= l1
->offset
- l2
->offset
;
697 /* short length > long length */
698 d
= l2
->length
- l1
->length
;
702 /* read > read/write */
703 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
707 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
708 struct pnfs_layout_segment
*lseg
)
710 struct pnfs_layout_segment
*lp
;
712 dprintk("%s:Begin\n", __func__
);
714 assert_spin_locked(&lo
->plh_inode
->i_lock
);
715 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
716 if (cmp_layout(&lseg
->pls_range
, &lp
->pls_range
) > 0)
718 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
719 dprintk("%s: inserted lseg %p "
720 "iomode %d offset %llu length %llu before "
721 "lp %p iomode %d offset %llu length %llu\n",
722 __func__
, lseg
, lseg
->pls_range
.iomode
,
723 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
724 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
725 lp
->pls_range
.length
);
728 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
729 dprintk("%s: inserted lseg %p "
730 "iomode %d offset %llu length %llu at tail\n",
731 __func__
, lseg
, lseg
->pls_range
.iomode
,
732 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
736 dprintk("%s:Return\n", __func__
);
739 static struct pnfs_layout_hdr
*
740 alloc_init_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
742 struct pnfs_layout_hdr
*lo
;
744 lo
= kzalloc(sizeof(struct pnfs_layout_hdr
), gfp_flags
);
747 atomic_set(&lo
->plh_refcount
, 1);
748 INIT_LIST_HEAD(&lo
->plh_layouts
);
749 INIT_LIST_HEAD(&lo
->plh_segs
);
750 INIT_LIST_HEAD(&lo
->plh_bulk_recall
);
755 static struct pnfs_layout_hdr
*
756 pnfs_find_alloc_layout(struct inode
*ino
, gfp_t gfp_flags
)
758 struct nfs_inode
*nfsi
= NFS_I(ino
);
759 struct pnfs_layout_hdr
*new = NULL
;
761 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
763 assert_spin_locked(&ino
->i_lock
);
765 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
770 spin_unlock(&ino
->i_lock
);
771 new = alloc_init_layout_hdr(ino
, gfp_flags
);
772 spin_lock(&ino
->i_lock
);
774 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
782 * iomode matching rules:
793 is_matching_lseg(struct pnfs_layout_range
*ls_range
,
794 struct pnfs_layout_range
*range
)
796 struct pnfs_layout_range range1
;
798 if ((range
->iomode
== IOMODE_RW
&&
799 ls_range
->iomode
!= IOMODE_RW
) ||
800 !lo_seg_intersecting(ls_range
, range
))
803 /* range1 covers only the first byte in the range */
806 return lo_seg_contained(ls_range
, &range1
);
810 * lookup range in layout
812 static struct pnfs_layout_segment
*
813 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
814 struct pnfs_layout_range
*range
)
816 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
818 dprintk("%s:Begin\n", __func__
);
820 assert_spin_locked(&lo
->plh_inode
->i_lock
);
821 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
822 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
823 is_matching_lseg(&lseg
->pls_range
, range
)) {
824 ret
= get_lseg(lseg
);
827 if (cmp_layout(range
, &lseg
->pls_range
) > 0)
831 dprintk("%s:Return lseg %p ref %d\n",
832 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
837 * Layout segment is retreived from the server if not cached.
838 * The appropriate layout segment is referenced and returned to the caller.
840 struct pnfs_layout_segment
*
841 pnfs_update_layout(struct inode
*ino
,
842 struct nfs_open_context
*ctx
,
845 enum pnfs_iomode iomode
,
848 struct pnfs_layout_range arg
= {
854 struct nfs_inode
*nfsi
= NFS_I(ino
);
855 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
856 struct pnfs_layout_hdr
*lo
;
857 struct pnfs_layout_segment
*lseg
= NULL
;
860 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
862 spin_lock(&ino
->i_lock
);
863 lo
= pnfs_find_alloc_layout(ino
, gfp_flags
);
865 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
869 /* Do we even need to bother with this? */
870 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
871 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
872 dprintk("%s matches recall, use MDS\n", __func__
);
876 /* if LAYOUTGET already failed once we don't try again */
877 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
880 /* Check to see if the layout for the given range already exists */
881 lseg
= pnfs_find_lseg(lo
, &arg
);
885 if (pnfs_layoutgets_blocked(lo
, NULL
, 0))
887 atomic_inc(&lo
->plh_outstanding
);
890 if (list_empty(&lo
->plh_segs
))
892 spin_unlock(&ino
->i_lock
);
894 /* The lo must be on the clp list if there is any
895 * chance of a CB_LAYOUTRECALL(FILE) coming in.
897 spin_lock(&clp
->cl_lock
);
898 BUG_ON(!list_empty(&lo
->plh_layouts
));
899 list_add_tail(&lo
->plh_layouts
, &clp
->cl_layouts
);
900 spin_unlock(&clp
->cl_lock
);
903 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
905 arg
.offset
-= pg_offset
;
906 arg
.length
+= pg_offset
;
908 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
910 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
911 if (!lseg
&& first
) {
912 spin_lock(&clp
->cl_lock
);
913 list_del_init(&lo
->plh_layouts
);
914 spin_unlock(&clp
->cl_lock
);
916 atomic_dec(&lo
->plh_outstanding
);
919 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
920 nfsi
->layout
? nfsi
->layout
->plh_flags
: -1, lseg
);
923 spin_unlock(&ino
->i_lock
);
928 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
930 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
931 struct nfs4_layoutget_res
*res
= &lgp
->res
;
932 struct pnfs_layout_segment
*lseg
;
933 struct inode
*ino
= lo
->plh_inode
;
934 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
937 /* Inject layout blob into I/O device driver */
938 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
939 if (!lseg
|| IS_ERR(lseg
)) {
943 status
= PTR_ERR(lseg
);
944 dprintk("%s: Could not allocate layout: error %d\n",
949 spin_lock(&ino
->i_lock
);
950 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
951 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
952 dprintk("%s forget reply due to recall\n", __func__
);
953 goto out_forget_reply
;
956 if (pnfs_layoutgets_blocked(lo
, &res
->stateid
, 1)) {
957 dprintk("%s forget reply due to state\n", __func__
);
958 goto out_forget_reply
;
961 lseg
->pls_range
= res
->range
;
962 *lgp
->lsegpp
= get_lseg(lseg
);
963 pnfs_insert_layout(lo
, lseg
);
965 if (res
->return_on_close
) {
966 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
967 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
970 /* Done processing layoutget. Set the layout stateid */
971 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
972 spin_unlock(&ino
->i_lock
);
977 spin_unlock(&ino
->i_lock
);
978 lseg
->pls_layout
= lo
;
979 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
983 static int pnfs_read_pg_test(struct nfs_pageio_descriptor
*pgio
,
984 struct nfs_page
*prev
,
985 struct nfs_page
*req
)
987 if (pgio
->pg_count
== prev
->wb_bytes
) {
988 /* This is first coelesce call for a series of nfs_pages */
989 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
995 } else if (pgio
->pg_lseg
&&
996 req_offset(req
) > end_offset(pgio
->pg_lseg
->pls_range
.offset
,
997 pgio
->pg_lseg
->pls_range
.length
))
999 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
1003 pnfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
1005 struct pnfs_layoutdriver_type
*ld
;
1007 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
1008 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_read_pg_test
: NULL
;
1011 static int pnfs_write_pg_test(struct nfs_pageio_descriptor
*pgio
,
1012 struct nfs_page
*prev
,
1013 struct nfs_page
*req
)
1015 if (pgio
->pg_count
== prev
->wb_bytes
) {
1016 /* This is first coelesce call for a series of nfs_pages */
1017 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1023 } else if (pgio
->pg_lseg
&&
1024 req_offset(req
) > end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1025 pgio
->pg_lseg
->pls_range
.length
))
1027 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
1031 pnfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
1033 struct pnfs_layoutdriver_type
*ld
;
1035 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
1036 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_write_pg_test
: NULL
;
1039 enum pnfs_try_status
1040 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
1041 const struct rpc_call_ops
*call_ops
, int how
)
1043 struct inode
*inode
= wdata
->inode
;
1044 enum pnfs_try_status trypnfs
;
1045 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1047 wdata
->mds_ops
= call_ops
;
1049 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1050 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
1052 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
1053 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1054 put_lseg(wdata
->lseg
);
1057 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1059 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1064 * Call the appropriate parallel I/O subsystem read function.
1066 enum pnfs_try_status
1067 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
1068 const struct rpc_call_ops
*call_ops
)
1070 struct inode
*inode
= rdata
->inode
;
1071 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1072 enum pnfs_try_status trypnfs
;
1074 rdata
->mds_ops
= call_ops
;
1076 dprintk("%s: Reading ino:%lu %u@%llu\n",
1077 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
1079 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
1080 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1081 put_lseg(rdata
->lseg
);
1084 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
1086 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1091 * Currently there is only one (whole file) write lseg.
1093 static struct pnfs_layout_segment
*pnfs_list_write_lseg(struct inode
*inode
)
1095 struct pnfs_layout_segment
*lseg
, *rv
= NULL
;
1097 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
)
1098 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
1104 pnfs_set_layoutcommit(struct nfs_write_data
*wdata
)
1106 struct nfs_inode
*nfsi
= NFS_I(wdata
->inode
);
1107 loff_t end_pos
= wdata
->args
.offset
+ wdata
->res
.count
;
1108 bool mark_as_dirty
= false;
1110 spin_lock(&nfsi
->vfs_inode
.i_lock
);
1111 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1112 /* references matched in nfs4_layoutcommit_release */
1113 get_lseg(wdata
->lseg
);
1114 wdata
->lseg
->pls_lc_cred
=
1115 get_rpccred(wdata
->args
.context
->state
->owner
->so_cred
);
1116 mark_as_dirty
= true;
1117 dprintk("%s: Set layoutcommit for inode %lu ",
1118 __func__
, wdata
->inode
->i_ino
);
1120 if (end_pos
> wdata
->lseg
->pls_end_pos
)
1121 wdata
->lseg
->pls_end_pos
= end_pos
;
1122 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
1124 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1125 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1127 mark_inode_dirty_sync(wdata
->inode
);
1129 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
1132 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1133 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1134 * data to disk to allow the server to recover the data if it crashes.
1135 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1136 * is off, and a COMMIT is sent to a data server, or
1137 * if WRITEs to a data server return NFS_DATA_SYNC.
1140 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
1142 struct nfs4_layoutcommit_data
*data
;
1143 struct nfs_inode
*nfsi
= NFS_I(inode
);
1144 struct pnfs_layout_segment
*lseg
;
1145 struct rpc_cred
*cred
;
1149 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
1151 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1154 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1155 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
1157 mark_inode_dirty_sync(inode
);
1162 spin_lock(&inode
->i_lock
);
1163 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1164 spin_unlock(&inode
->i_lock
);
1169 * Currently only one (whole file) write lseg which is referenced
1170 * in pnfs_set_layoutcommit and will be found.
1172 lseg
= pnfs_list_write_lseg(inode
);
1174 end_pos
= lseg
->pls_end_pos
;
1175 cred
= lseg
->pls_lc_cred
;
1176 lseg
->pls_end_pos
= 0;
1177 lseg
->pls_lc_cred
= NULL
;
1179 memcpy(&data
->args
.stateid
.data
, nfsi
->layout
->plh_stateid
.data
,
1180 sizeof(nfsi
->layout
->plh_stateid
.data
));
1181 spin_unlock(&inode
->i_lock
);
1183 data
->args
.inode
= inode
;
1186 nfs_fattr_init(&data
->fattr
);
1187 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
1188 data
->res
.fattr
= &data
->fattr
;
1189 data
->args
.lastbytewritten
= end_pos
- 1;
1190 data
->res
.server
= NFS_SERVER(inode
);
1192 status
= nfs4_proc_layoutcommit(data
, sync
);
1194 dprintk("<-- %s status %d\n", __func__
, status
);