2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 static struct pnfs_layout_hdr
*
30 ff_layout_alloc_layout_hdr(struct inode
*inode
, gfp_t gfp_flags
)
32 struct nfs4_flexfile_layout
*ffl
;
34 ffl
= kzalloc(sizeof(*ffl
), gfp_flags
);
36 INIT_LIST_HEAD(&ffl
->error_list
);
37 return &ffl
->generic_hdr
;
43 ff_layout_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
45 struct nfs4_ff_layout_ds_err
*err
, *n
;
47 list_for_each_entry_safe(err
, n
, &FF_LAYOUT_FROM_HDR(lo
)->error_list
,
52 kfree(FF_LAYOUT_FROM_HDR(lo
));
55 static int decode_stateid(struct xdr_stream
*xdr
, nfs4_stateid
*stateid
)
59 p
= xdr_inline_decode(xdr
, NFS4_STATEID_SIZE
);
60 if (unlikely(p
== NULL
))
62 memcpy(stateid
, p
, NFS4_STATEID_SIZE
);
63 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__
,
64 p
[0], p
[1], p
[2], p
[3]);
68 static int decode_deviceid(struct xdr_stream
*xdr
, struct nfs4_deviceid
*devid
)
72 p
= xdr_inline_decode(xdr
, NFS4_DEVICEID4_SIZE
);
75 memcpy(devid
, p
, NFS4_DEVICEID4_SIZE
);
76 nfs4_print_deviceid(devid
);
80 static int decode_nfs_fh(struct xdr_stream
*xdr
, struct nfs_fh
*fh
)
84 p
= xdr_inline_decode(xdr
, 4);
87 fh
->size
= be32_to_cpup(p
++);
88 if (fh
->size
> sizeof(struct nfs_fh
)) {
89 printk(KERN_ERR
"NFS flexfiles: Too big fh received %d\n",
94 p
= xdr_inline_decode(xdr
, fh
->size
);
97 memcpy(&fh
->data
, p
, fh
->size
);
98 dprintk("%s: fh len %d\n", __func__
, fh
->size
);
104 * Currently only stringified uids and gids are accepted.
105 * I.e., kerberos is not supported to the DSes, so no pricipals.
107 * That means that one common function will suffice, but when
108 * principals are added, this should be split to accomodate
109 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
112 decode_name(struct xdr_stream
*xdr
, u32
*id
)
117 /* opaque_length(4)*/
118 p
= xdr_inline_decode(xdr
, 4);
121 len
= be32_to_cpup(p
++);
125 dprintk("%s: len %u\n", __func__
, len
);
128 p
= xdr_inline_decode(xdr
, len
);
132 if (!nfs_map_string_to_numeric((char *)p
, len
, id
))
138 static struct nfs4_ff_layout_mirror
*ff_layout_alloc_mirror(gfp_t gfp_flags
)
140 struct nfs4_ff_layout_mirror
*mirror
;
142 mirror
= kzalloc(sizeof(*mirror
), gfp_flags
);
143 if (mirror
!= NULL
) {
144 spin_lock_init(&mirror
->lock
);
145 atomic_set(&mirror
->ref
, 1);
150 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror
*mirror
)
152 kfree(mirror
->fh_versions
);
153 nfs4_ff_layout_put_deviceid(mirror
->mirror_ds
);
157 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror
*mirror
)
159 if (mirror
!= NULL
&& atomic_dec_and_test(&mirror
->ref
))
160 ff_layout_free_mirror(mirror
);
163 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment
*fls
)
167 if (fls
->mirror_array
) {
168 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
169 /* normally mirror_ds is freed in
170 * .free_deviceid_node but we still do it here
171 * for .alloc_lseg error path */
172 ff_layout_put_mirror(fls
->mirror_array
[i
]);
174 kfree(fls
->mirror_array
);
175 fls
->mirror_array
= NULL
;
179 static int ff_layout_check_layout(struct nfs4_layoutget_res
*lgr
)
183 dprintk("--> %s\n", __func__
);
185 /* FIXME: remove this check when layout segment support is added */
186 if (lgr
->range
.offset
!= 0 ||
187 lgr
->range
.length
!= NFS4_MAX_UINT64
) {
188 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
193 dprintk("--> %s returns %d\n", __func__
, ret
);
197 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment
*fls
)
200 ff_layout_free_mirror_array(fls
);
205 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment
*fls
)
209 for (i
= 0; i
< fls
->mirror_array_cnt
- 1; i
++) {
210 for (j
= i
+ 1; j
< fls
->mirror_array_cnt
; j
++)
211 if (fls
->mirror_array
[i
]->efficiency
<
212 fls
->mirror_array
[j
]->efficiency
)
213 swap(fls
->mirror_array
[i
],
214 fls
->mirror_array
[j
]);
218 static struct pnfs_layout_segment
*
219 ff_layout_alloc_lseg(struct pnfs_layout_hdr
*lh
,
220 struct nfs4_layoutget_res
*lgr
,
223 struct pnfs_layout_segment
*ret
;
224 struct nfs4_ff_layout_segment
*fls
= NULL
;
225 struct xdr_stream stream
;
227 struct page
*scratch
;
229 u32 mirror_array_cnt
;
233 dprintk("--> %s\n", __func__
);
234 scratch
= alloc_page(gfp_flags
);
236 return ERR_PTR(-ENOMEM
);
238 xdr_init_decode_pages(&stream
, &buf
, lgr
->layoutp
->pages
,
240 xdr_set_scratch_buffer(&stream
, page_address(scratch
), PAGE_SIZE
);
242 /* stripe unit and mirror_array_cnt */
244 p
= xdr_inline_decode(&stream
, 8 + 4);
248 p
= xdr_decode_hyper(p
, &stripe_unit
);
249 mirror_array_cnt
= be32_to_cpup(p
++);
250 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__
,
251 stripe_unit
, mirror_array_cnt
);
253 if (mirror_array_cnt
> NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT
||
254 mirror_array_cnt
== 0)
258 fls
= kzalloc(sizeof(*fls
), gfp_flags
);
262 fls
->mirror_array_cnt
= mirror_array_cnt
;
263 fls
->stripe_unit
= stripe_unit
;
264 fls
->mirror_array
= kcalloc(fls
->mirror_array_cnt
,
265 sizeof(fls
->mirror_array
[0]), gfp_flags
);
266 if (fls
->mirror_array
== NULL
)
269 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
270 struct nfs4_deviceid devid
;
271 struct nfs4_deviceid_node
*idnode
;
277 p
= xdr_inline_decode(&stream
, 4);
280 ds_count
= be32_to_cpup(p
);
282 /* FIXME: allow for striping? */
286 fls
->mirror_array
[i
] = ff_layout_alloc_mirror(gfp_flags
);
287 if (fls
->mirror_array
[i
] == NULL
) {
292 fls
->mirror_array
[i
]->ds_count
= ds_count
;
295 rc
= decode_deviceid(&stream
, &devid
);
299 idnode
= nfs4_find_get_deviceid(NFS_SERVER(lh
->plh_inode
),
300 &devid
, lh
->plh_lc_cred
,
303 * upon success, mirror_ds is allocated by previous
304 * getdeviceinfo, or newly by .alloc_deviceid_node
305 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
308 fls
->mirror_array
[i
]->mirror_ds
=
309 FF_LAYOUT_MIRROR_DS(idnode
);
315 p
= xdr_inline_decode(&stream
, 4);
318 fls
->mirror_array
[i
]->efficiency
= be32_to_cpup(p
);
321 rc
= decode_stateid(&stream
, &fls
->mirror_array
[i
]->stateid
);
326 p
= xdr_inline_decode(&stream
, 4);
329 fh_count
= be32_to_cpup(p
);
331 fls
->mirror_array
[i
]->fh_versions
=
332 kzalloc(fh_count
* sizeof(struct nfs_fh
),
334 if (fls
->mirror_array
[i
]->fh_versions
== NULL
) {
339 for (j
= 0; j
< fh_count
; j
++) {
340 rc
= decode_nfs_fh(&stream
,
341 &fls
->mirror_array
[i
]->fh_versions
[j
]);
346 fls
->mirror_array
[i
]->fh_versions_cnt
= fh_count
;
349 rc
= decode_name(&stream
, &fls
->mirror_array
[i
]->uid
);
354 rc
= decode_name(&stream
, &fls
->mirror_array
[i
]->gid
);
358 dprintk("%s: uid %d gid %d\n", __func__
,
359 fls
->mirror_array
[i
]->uid
,
360 fls
->mirror_array
[i
]->gid
);
363 p
= xdr_inline_decode(&stream
, 4);
365 fls
->flags
= be32_to_cpup(p
);
367 ff_layout_sort_mirrors(fls
);
368 rc
= ff_layout_check_layout(lgr
);
372 ret
= &fls
->generic_hdr
;
373 dprintk("<-- %s (success)\n", __func__
);
375 __free_page(scratch
);
378 _ff_layout_free_lseg(fls
);
380 dprintk("<-- %s (%d)\n", __func__
, rc
);
384 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr
*layout
)
386 struct pnfs_layout_segment
*lseg
;
388 list_for_each_entry(lseg
, &layout
->plh_segs
, pls_list
)
389 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
396 ff_layout_free_lseg(struct pnfs_layout_segment
*lseg
)
398 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
401 dprintk("--> %s\n", __func__
);
403 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
404 if (fls
->mirror_array
[i
]) {
405 nfs4_ff_layout_put_deviceid(fls
->mirror_array
[i
]->mirror_ds
);
406 fls
->mirror_array
[i
]->mirror_ds
= NULL
;
407 if (fls
->mirror_array
[i
]->cred
) {
408 put_rpccred(fls
->mirror_array
[i
]->cred
);
409 fls
->mirror_array
[i
]->cred
= NULL
;
414 if (lseg
->pls_range
.iomode
== IOMODE_RW
) {
415 struct nfs4_flexfile_layout
*ffl
;
418 ffl
= FF_LAYOUT_FROM_HDR(lseg
->pls_layout
);
419 inode
= ffl
->generic_hdr
.plh_inode
;
420 spin_lock(&inode
->i_lock
);
421 if (!ff_layout_has_rw_segments(lseg
->pls_layout
)) {
422 ffl
->commit_info
.nbuckets
= 0;
423 kfree(ffl
->commit_info
.buckets
);
424 ffl
->commit_info
.buckets
= NULL
;
426 spin_unlock(&inode
->i_lock
);
428 _ff_layout_free_lseg(fls
);
431 /* Return 1 until we have multiple lsegs support */
433 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment
*fls
)
439 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
441 /* first IO request? */
442 if (atomic_inc_return(&timer
->n_ops
) == 1) {
443 timer
->start_time
= now
;
448 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
452 if (atomic_dec_return(&timer
->n_ops
) < 0)
455 start
= timer
->start_time
;
456 timer
->start_time
= now
;
457 return ktime_sub(now
, start
);
461 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror
*mirror
,
462 struct nfs4_ff_layoutstat
*layoutstat
,
465 static const ktime_t notime
= {0};
467 nfs4_ff_start_busy_timer(&layoutstat
->busy_timer
, now
);
468 if (ktime_equal(mirror
->start_time
, notime
))
469 mirror
->start_time
= now
;
470 if (ktime_equal(mirror
->last_report_time
, notime
))
471 mirror
->last_report_time
= now
;
472 if (ktime_to_ms(ktime_sub(now
, mirror
->last_report_time
)) >=
473 FF_LAYOUTSTATS_REPORT_INTERVAL
) {
474 mirror
->last_report_time
= now
;
482 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat
*layoutstat
,
485 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
487 iostat
->ops_requested
++;
488 iostat
->bytes_requested
+= requested
;
492 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat
*layoutstat
,
495 ktime_t time_completed
,
496 ktime_t time_started
)
498 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
499 ktime_t completion_time
= ktime_sub(time_completed
, time_started
);
502 iostat
->ops_completed
++;
503 iostat
->bytes_completed
+= completed
;
504 iostat
->bytes_not_delivered
+= requested
- completed
;
506 timer
= nfs4_ff_end_busy_timer(&layoutstat
->busy_timer
, time_completed
);
507 iostat
->total_busy_time
=
508 ktime_add(iostat
->total_busy_time
, timer
);
509 iostat
->aggregate_completion_time
=
510 ktime_add(iostat
->aggregate_completion_time
,
515 nfs4_ff_layout_stat_io_start_read(struct inode
*inode
,
516 struct nfs4_ff_layout_mirror
*mirror
,
517 __u64 requested
, ktime_t now
)
521 spin_lock(&mirror
->lock
);
522 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->read_stat
, now
);
523 nfs4_ff_layout_stat_io_update_requested(&mirror
->read_stat
, requested
);
524 spin_unlock(&mirror
->lock
);
527 pnfs_report_layoutstat(inode
, GFP_KERNEL
);
531 nfs4_ff_layout_stat_io_end_read(struct rpc_task
*task
,
532 struct nfs4_ff_layout_mirror
*mirror
,
536 spin_lock(&mirror
->lock
);
537 nfs4_ff_layout_stat_io_update_completed(&mirror
->read_stat
,
538 requested
, completed
,
539 ktime_get(), task
->tk_start
);
540 spin_unlock(&mirror
->lock
);
544 nfs4_ff_layout_stat_io_start_write(struct inode
*inode
,
545 struct nfs4_ff_layout_mirror
*mirror
,
546 __u64 requested
, ktime_t now
)
550 spin_lock(&mirror
->lock
);
551 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->write_stat
, now
);
552 nfs4_ff_layout_stat_io_update_requested(&mirror
->write_stat
, requested
);
553 spin_unlock(&mirror
->lock
);
556 pnfs_report_layoutstat(inode
, GFP_NOIO
);
560 nfs4_ff_layout_stat_io_end_write(struct rpc_task
*task
,
561 struct nfs4_ff_layout_mirror
*mirror
,
564 enum nfs3_stable_how committed
)
566 if (committed
== NFS_UNSTABLE
)
567 requested
= completed
= 0;
569 spin_lock(&mirror
->lock
);
570 nfs4_ff_layout_stat_io_update_completed(&mirror
->write_stat
,
571 requested
, completed
, ktime_get(), task
->tk_start
);
572 spin_unlock(&mirror
->lock
);
576 ff_layout_alloc_commit_info(struct pnfs_layout_segment
*lseg
,
577 struct nfs_commit_info
*cinfo
,
580 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
581 struct pnfs_commit_bucket
*buckets
;
584 if (cinfo
->ds
->nbuckets
!= 0) {
585 /* This assumes there is only one RW lseg per file.
586 * To support multiple lseg per file, we need to
587 * change struct pnfs_commit_bucket to allow dynamic
588 * increasing nbuckets.
593 size
= ff_layout_get_lseg_count(fls
) * FF_LAYOUT_MIRROR_COUNT(lseg
);
595 buckets
= kcalloc(size
, sizeof(struct pnfs_commit_bucket
),
602 spin_lock(cinfo
->lock
);
603 if (cinfo
->ds
->nbuckets
!= 0)
606 cinfo
->ds
->buckets
= buckets
;
607 cinfo
->ds
->nbuckets
= size
;
608 for (i
= 0; i
< size
; i
++) {
609 INIT_LIST_HEAD(&buckets
[i
].written
);
610 INIT_LIST_HEAD(&buckets
[i
].committing
);
611 /* mark direct verifier as unset */
612 buckets
[i
].direct_verf
.committed
=
613 NFS_INVALID_STABLE_HOW
;
616 spin_unlock(cinfo
->lock
);
621 static struct nfs4_pnfs_ds
*
622 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor
*pgio
,
625 struct nfs4_ff_layout_segment
*fls
;
626 struct nfs4_pnfs_ds
*ds
;
629 fls
= FF_LAYOUT_LSEG(pgio
->pg_lseg
);
630 /* mirrors are sorted by efficiency */
631 for (idx
= 0; idx
< fls
->mirror_array_cnt
; idx
++) {
632 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, idx
, false);
643 ff_layout_pg_init_read(struct nfs_pageio_descriptor
*pgio
,
644 struct nfs_page
*req
)
646 struct nfs_pgio_mirror
*pgm
;
647 struct nfs4_ff_layout_mirror
*mirror
;
648 struct nfs4_pnfs_ds
*ds
;
651 /* Use full layout for now */
653 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
659 /* If no lseg, fall back to read through mds */
660 if (pgio
->pg_lseg
== NULL
)
663 ds
= ff_layout_choose_best_ds_for_read(pgio
, &ds_idx
);
666 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, ds_idx
);
668 pgio
->pg_mirror_idx
= ds_idx
;
670 /* read always uses only one mirror - idx 0 for pgio layer */
671 pgm
= &pgio
->pg_mirrors
[0];
672 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].rsize
;
676 pnfs_put_lseg(pgio
->pg_lseg
);
677 pgio
->pg_lseg
= NULL
;
678 nfs_pageio_reset_read_mds(pgio
);
682 ff_layout_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
683 struct nfs_page
*req
)
685 struct nfs4_ff_layout_mirror
*mirror
;
686 struct nfs_pgio_mirror
*pgm
;
687 struct nfs_commit_info cinfo
;
688 struct nfs4_pnfs_ds
*ds
;
693 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
699 /* If no lseg, fall back to write through mds */
700 if (pgio
->pg_lseg
== NULL
)
703 nfs_init_cinfo(&cinfo
, pgio
->pg_inode
, pgio
->pg_dreq
);
704 status
= ff_layout_alloc_commit_info(pgio
->pg_lseg
, &cinfo
, GFP_NOFS
);
708 /* Use a direct mapping of ds_idx to pgio mirror_idx */
709 if (WARN_ON_ONCE(pgio
->pg_mirror_count
!=
710 FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
)))
713 for (i
= 0; i
< pgio
->pg_mirror_count
; i
++) {
714 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, i
, true);
717 pgm
= &pgio
->pg_mirrors
[i
];
718 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, i
);
719 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].wsize
;
725 pnfs_put_lseg(pgio
->pg_lseg
);
726 pgio
->pg_lseg
= NULL
;
727 nfs_pageio_reset_write_mds(pgio
);
731 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor
*pgio
,
732 struct nfs_page
*req
)
735 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
742 return FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
);
744 /* no lseg means that pnfs is not in use, so no mirroring here */
745 nfs_pageio_reset_write_mds(pgio
);
749 static const struct nfs_pageio_ops ff_layout_pg_read_ops
= {
750 .pg_init
= ff_layout_pg_init_read
,
751 .pg_test
= pnfs_generic_pg_test
,
752 .pg_doio
= pnfs_generic_pg_readpages
,
753 .pg_cleanup
= pnfs_generic_pg_cleanup
,
756 static const struct nfs_pageio_ops ff_layout_pg_write_ops
= {
757 .pg_init
= ff_layout_pg_init_write
,
758 .pg_test
= pnfs_generic_pg_test
,
759 .pg_doio
= pnfs_generic_pg_writepages
,
760 .pg_get_mirror_count
= ff_layout_pg_get_mirror_count_write
,
761 .pg_cleanup
= pnfs_generic_pg_cleanup
,
764 static void ff_layout_reset_write(struct nfs_pgio_header
*hdr
, bool retry_pnfs
)
766 struct rpc_task
*task
= &hdr
->task
;
768 pnfs_layoutcommit_inode(hdr
->inode
, false);
771 dprintk("%s Reset task %5u for i/o through pNFS "
772 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
774 hdr
->inode
->i_sb
->s_id
,
775 (unsigned long long)NFS_FILEID(hdr
->inode
),
777 (unsigned long long)hdr
->args
.offset
);
780 struct nfs_open_context
*ctx
;
782 ctx
= nfs_list_entry(hdr
->pages
.next
)->wb_context
;
783 set_bit(NFS_CONTEXT_RESEND_WRITES
, &ctx
->flags
);
784 hdr
->completion_ops
->error_cleanup(&hdr
->pages
);
786 nfs_direct_set_resched_writes(hdr
->dreq
);
787 /* fake unstable write to let common nfs resend pages */
788 hdr
->verf
.committed
= NFS_UNSTABLE
;
789 hdr
->good_bytes
= hdr
->args
.count
;
794 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
795 dprintk("%s Reset task %5u for i/o through MDS "
796 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
798 hdr
->inode
->i_sb
->s_id
,
799 (unsigned long long)NFS_FILEID(hdr
->inode
),
801 (unsigned long long)hdr
->args
.offset
);
803 task
->tk_status
= pnfs_write_done_resend_to_mds(hdr
);
807 static void ff_layout_reset_read(struct nfs_pgio_header
*hdr
)
809 struct rpc_task
*task
= &hdr
->task
;
811 pnfs_layoutcommit_inode(hdr
->inode
, false);
813 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
814 dprintk("%s Reset task %5u for i/o through MDS "
815 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
817 hdr
->inode
->i_sb
->s_id
,
818 (unsigned long long)NFS_FILEID(hdr
->inode
),
820 (unsigned long long)hdr
->args
.offset
);
822 task
->tk_status
= pnfs_read_done_resend_to_mds(hdr
);
826 static int ff_layout_async_handle_error_v4(struct rpc_task
*task
,
827 struct nfs4_state
*state
,
828 struct nfs_client
*clp
,
829 struct pnfs_layout_segment
*lseg
,
832 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
833 struct inode
*inode
= lo
->plh_inode
;
834 struct nfs_server
*mds_server
= NFS_SERVER(inode
);
836 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
837 struct nfs_client
*mds_client
= mds_server
->nfs_client
;
838 struct nfs4_slot_table
*tbl
= &clp
->cl_session
->fc_slot_table
;
840 if (task
->tk_status
>= 0)
843 switch (task
->tk_status
) {
844 /* MDS state errors */
845 case -NFS4ERR_DELEG_REVOKED
:
846 case -NFS4ERR_ADMIN_REVOKED
:
847 case -NFS4ERR_BAD_STATEID
:
850 nfs_remove_bad_delegation(state
->inode
);
851 case -NFS4ERR_OPENMODE
:
854 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
855 goto out_bad_stateid
;
856 goto wait_on_recovery
;
857 case -NFS4ERR_EXPIRED
:
859 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
860 goto out_bad_stateid
;
862 nfs4_schedule_lease_recovery(mds_client
);
863 goto wait_on_recovery
;
864 /* DS session errors */
865 case -NFS4ERR_BADSESSION
:
866 case -NFS4ERR_BADSLOT
:
867 case -NFS4ERR_BAD_HIGH_SLOT
:
868 case -NFS4ERR_DEADSESSION
:
869 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
870 case -NFS4ERR_SEQ_FALSE_RETRY
:
871 case -NFS4ERR_SEQ_MISORDERED
:
872 dprintk("%s ERROR %d, Reset session. Exchangeid "
873 "flags 0x%x\n", __func__
, task
->tk_status
,
874 clp
->cl_exchange_flags
);
875 nfs4_schedule_session_recovery(clp
->cl_session
, task
->tk_status
);
879 rpc_delay(task
, FF_LAYOUT_POLL_RETRY_MAX
);
881 case -NFS4ERR_RETRY_UNCACHED_REP
:
883 /* Invalidate Layout errors */
884 case -NFS4ERR_PNFS_NO_LAYOUT
:
885 case -ESTALE
: /* mapped NFS4ERR_STALE */
886 case -EBADHANDLE
: /* mapped NFS4ERR_BADHANDLE */
887 case -EISDIR
: /* mapped NFS4ERR_ISDIR */
888 case -NFS4ERR_FHEXPIRED
:
889 case -NFS4ERR_WRONG_TYPE
:
890 dprintk("%s Invalid layout error %d\n", __func__
,
893 * Destroy layout so new i/o will get a new layout.
894 * Layout will not be destroyed until all current lseg
895 * references are put. Mark layout as invalid to resend failed
896 * i/o and all i/o waiting on the slot table to the MDS until
897 * layout is destroyed and a new valid layout is obtained.
899 pnfs_destroy_layout(NFS_I(inode
));
900 rpc_wake_up(&tbl
->slot_tbl_waitq
);
902 /* RPC connection errors */
910 dprintk("%s DS connection error %d\n", __func__
,
912 nfs4_mark_deviceid_unavailable(devid
);
913 rpc_wake_up(&tbl
->slot_tbl_waitq
);
916 if (ff_layout_has_available_ds(lseg
))
917 return -NFS4ERR_RESET_TO_PNFS
;
919 dprintk("%s Retry through MDS. Error %d\n", __func__
,
921 return -NFS4ERR_RESET_TO_MDS
;
927 task
->tk_status
= -EIO
;
930 rpc_sleep_on(&mds_client
->cl_rpcwaitq
, task
, NULL
);
931 if (test_bit(NFS4CLNT_MANAGER_RUNNING
, &mds_client
->cl_state
) == 0)
932 rpc_wake_up_queued_task(&mds_client
->cl_rpcwaitq
, task
);
936 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
937 static int ff_layout_async_handle_error_v3(struct rpc_task
*task
,
938 struct pnfs_layout_segment
*lseg
,
941 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
943 if (task
->tk_status
>= 0)
946 if (task
->tk_status
!= -EJUKEBOX
) {
947 dprintk("%s DS connection error %d\n", __func__
,
949 nfs4_mark_deviceid_unavailable(devid
);
950 if (ff_layout_has_available_ds(lseg
))
951 return -NFS4ERR_RESET_TO_PNFS
;
953 return -NFS4ERR_RESET_TO_MDS
;
956 if (task
->tk_status
== -EJUKEBOX
)
957 nfs_inc_stats(lseg
->pls_layout
->plh_inode
, NFSIOS_DELAY
);
959 rpc_restart_call(task
);
960 rpc_delay(task
, NFS_JUKEBOX_RETRY_TIME
);
964 static int ff_layout_async_handle_error(struct rpc_task
*task
,
965 struct nfs4_state
*state
,
966 struct nfs_client
*clp
,
967 struct pnfs_layout_segment
*lseg
,
970 int vers
= clp
->cl_nfs_mod
->rpc_vers
->number
;
974 return ff_layout_async_handle_error_v3(task
, lseg
, idx
);
976 return ff_layout_async_handle_error_v4(task
, state
, clp
,
979 /* should never happen */
985 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment
*lseg
,
986 int idx
, u64 offset
, u64 length
,
987 u32 status
, int opnum
, int error
)
989 struct nfs4_ff_layout_mirror
*mirror
;
996 case -EPROTONOSUPPORT
:
1007 status
= NFS4ERR_NXIO
;
1010 status
= NFS4ERR_ACCESS
;
1017 mirror
= FF_LAYOUT_COMP(lseg
, idx
);
1018 err
= ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg
->pls_layout
),
1019 mirror
, offset
, length
, status
, opnum
,
1021 dprintk("%s: err %d op %d status %u\n", __func__
, err
, opnum
, status
);
1024 /* NFS_PROTO call done callback routines */
1026 static int ff_layout_read_done_cb(struct rpc_task
*task
,
1027 struct nfs_pgio_header
*hdr
)
1029 struct inode
*inode
;
1032 trace_nfs4_pnfs_read(hdr
, task
->tk_status
);
1033 if (task
->tk_status
< 0)
1034 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1035 hdr
->args
.offset
, hdr
->args
.count
,
1036 hdr
->res
.op_status
, OP_READ
,
1038 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1039 hdr
->ds_clp
, hdr
->lseg
,
1040 hdr
->pgio_mirror_idx
);
1043 case -NFS4ERR_RESET_TO_PNFS
:
1044 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1045 &hdr
->lseg
->pls_layout
->plh_flags
);
1046 pnfs_read_resend_pnfs(hdr
);
1047 return task
->tk_status
;
1048 case -NFS4ERR_RESET_TO_MDS
:
1049 inode
= hdr
->lseg
->pls_layout
->plh_inode
;
1050 pnfs_error_mark_layout_for_return(inode
, hdr
->lseg
);
1051 ff_layout_reset_read(hdr
);
1052 return task
->tk_status
;
1054 rpc_restart_call_prepare(task
);
1062 ff_layout_need_layoutcommit(struct pnfs_layout_segment
*lseg
)
1064 return !(FF_LAYOUT_LSEG(lseg
)->flags
& FF_FLAGS_NO_LAYOUTCOMMIT
);
1068 * We reference the rpc_cred of the first WRITE that triggers the need for
1069 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1070 * rfc5661 is not clear about which credential should be used.
1072 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1073 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1074 * we always send layoutcommit after DS writes.
1077 ff_layout_set_layoutcommit(struct nfs_pgio_header
*hdr
)
1079 if (!ff_layout_need_layoutcommit(hdr
->lseg
))
1082 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
1083 hdr
->mds_offset
+ hdr
->res
.count
);
1084 dprintk("%s inode %lu pls_end_pos %lu\n", __func__
, hdr
->inode
->i_ino
,
1085 (unsigned long) NFS_I(hdr
->inode
)->layout
->plh_lwb
);
1089 ff_layout_reset_to_mds(struct pnfs_layout_segment
*lseg
, int idx
)
1091 /* No mirroring for now */
1092 struct nfs4_deviceid_node
*node
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1094 return ff_layout_test_devid_unavailable(node
);
1097 static int ff_layout_read_prepare_common(struct rpc_task
*task
,
1098 struct nfs_pgio_header
*hdr
)
1100 nfs4_ff_layout_stat_io_start_read(hdr
->inode
,
1101 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1105 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1106 rpc_exit(task
, -EIO
);
1109 if (ff_layout_reset_to_mds(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1110 dprintk("%s task %u reset io to MDS\n", __func__
, task
->tk_pid
);
1111 if (ff_layout_has_available_ds(hdr
->lseg
))
1112 pnfs_read_resend_pnfs(hdr
);
1114 ff_layout_reset_read(hdr
);
1118 hdr
->pgio_done_cb
= ff_layout_read_done_cb
;
1124 * Call ops for the async read/write cases
1125 * In the case of dense layouts, the offset needs to be reset to its
1128 static void ff_layout_read_prepare_v3(struct rpc_task
*task
, void *data
)
1130 struct nfs_pgio_header
*hdr
= data
;
1132 if (ff_layout_read_prepare_common(task
, hdr
))
1135 rpc_call_start(task
);
1138 static int ff_layout_setup_sequence(struct nfs_client
*ds_clp
,
1139 struct nfs4_sequence_args
*args
,
1140 struct nfs4_sequence_res
*res
,
1141 struct rpc_task
*task
)
1143 if (ds_clp
->cl_session
)
1144 return nfs41_setup_sequence(ds_clp
->cl_session
,
1148 return nfs40_setup_sequence(ds_clp
->cl_slot_tbl
,
1154 static void ff_layout_read_prepare_v4(struct rpc_task
*task
, void *data
)
1156 struct nfs_pgio_header
*hdr
= data
;
1158 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1159 &hdr
->args
.seq_args
,
1164 if (ff_layout_read_prepare_common(task
, hdr
))
1167 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1168 hdr
->args
.lock_context
, FMODE_READ
) == -EIO
)
1169 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1172 static void ff_layout_read_call_done(struct rpc_task
*task
, void *data
)
1174 struct nfs_pgio_header
*hdr
= data
;
1176 dprintk("--> %s task->tk_status %d\n", __func__
, task
->tk_status
);
1178 nfs4_ff_layout_stat_io_end_read(task
,
1179 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1180 hdr
->args
.count
, hdr
->res
.count
);
1182 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1183 task
->tk_status
== 0) {
1184 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1188 /* Note this may cause RPC to be resent */
1189 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1192 static void ff_layout_read_count_stats(struct rpc_task
*task
, void *data
)
1194 struct nfs_pgio_header
*hdr
= data
;
1196 rpc_count_iostats_metrics(task
,
1197 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_READ
]);
1200 static int ff_layout_write_done_cb(struct rpc_task
*task
,
1201 struct nfs_pgio_header
*hdr
)
1203 struct inode
*inode
;
1206 trace_nfs4_pnfs_write(hdr
, task
->tk_status
);
1207 if (task
->tk_status
< 0)
1208 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1209 hdr
->args
.offset
, hdr
->args
.count
,
1210 hdr
->res
.op_status
, OP_WRITE
,
1212 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1213 hdr
->ds_clp
, hdr
->lseg
,
1214 hdr
->pgio_mirror_idx
);
1217 case -NFS4ERR_RESET_TO_PNFS
:
1218 case -NFS4ERR_RESET_TO_MDS
:
1219 inode
= hdr
->lseg
->pls_layout
->plh_inode
;
1220 pnfs_error_mark_layout_for_return(inode
, hdr
->lseg
);
1221 if (err
== -NFS4ERR_RESET_TO_PNFS
) {
1222 pnfs_set_retry_layoutget(hdr
->lseg
->pls_layout
);
1223 ff_layout_reset_write(hdr
, true);
1225 pnfs_clear_retry_layoutget(hdr
->lseg
->pls_layout
);
1226 ff_layout_reset_write(hdr
, false);
1228 return task
->tk_status
;
1230 rpc_restart_call_prepare(task
);
1234 if (hdr
->res
.verf
->committed
== NFS_FILE_SYNC
||
1235 hdr
->res
.verf
->committed
== NFS_DATA_SYNC
)
1236 ff_layout_set_layoutcommit(hdr
);
1238 /* zero out fattr since we don't care DS attr at all */
1239 hdr
->fattr
.valid
= 0;
1240 if (task
->tk_status
>= 0)
1241 nfs_writeback_update_inode(hdr
);
1246 static int ff_layout_commit_done_cb(struct rpc_task
*task
,
1247 struct nfs_commit_data
*data
)
1249 struct inode
*inode
;
1252 trace_nfs4_pnfs_commit_ds(data
, task
->tk_status
);
1253 if (task
->tk_status
< 0)
1254 ff_layout_io_track_ds_error(data
->lseg
, data
->ds_commit_index
,
1255 data
->args
.offset
, data
->args
.count
,
1256 data
->res
.op_status
, OP_COMMIT
,
1258 err
= ff_layout_async_handle_error(task
, NULL
, data
->ds_clp
,
1259 data
->lseg
, data
->ds_commit_index
);
1262 case -NFS4ERR_RESET_TO_PNFS
:
1263 case -NFS4ERR_RESET_TO_MDS
:
1264 inode
= data
->lseg
->pls_layout
->plh_inode
;
1265 pnfs_error_mark_layout_for_return(inode
, data
->lseg
);
1266 if (err
== -NFS4ERR_RESET_TO_PNFS
)
1267 pnfs_set_retry_layoutget(data
->lseg
->pls_layout
);
1269 pnfs_clear_retry_layoutget(data
->lseg
->pls_layout
);
1270 pnfs_generic_prepare_to_resend_writes(data
);
1273 rpc_restart_call_prepare(task
);
1277 if (data
->verf
.committed
== NFS_UNSTABLE
1278 && ff_layout_need_layoutcommit(data
->lseg
))
1279 pnfs_set_layoutcommit(data
->inode
, data
->lseg
, data
->lwb
);
1284 static int ff_layout_write_prepare_common(struct rpc_task
*task
,
1285 struct nfs_pgio_header
*hdr
)
1287 nfs4_ff_layout_stat_io_start_write(hdr
->inode
,
1288 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1292 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1293 rpc_exit(task
, -EIO
);
1297 if (ff_layout_reset_to_mds(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1300 retry_pnfs
= ff_layout_has_available_ds(hdr
->lseg
);
1301 dprintk("%s task %u reset io to %s\n", __func__
,
1302 task
->tk_pid
, retry_pnfs
? "pNFS" : "MDS");
1303 ff_layout_reset_write(hdr
, retry_pnfs
);
1311 static void ff_layout_write_prepare_v3(struct rpc_task
*task
, void *data
)
1313 struct nfs_pgio_header
*hdr
= data
;
1315 if (ff_layout_write_prepare_common(task
, hdr
))
1318 rpc_call_start(task
);
1321 static void ff_layout_write_prepare_v4(struct rpc_task
*task
, void *data
)
1323 struct nfs_pgio_header
*hdr
= data
;
1325 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1326 &hdr
->args
.seq_args
,
1331 if (ff_layout_write_prepare_common(task
, hdr
))
1334 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1335 hdr
->args
.lock_context
, FMODE_WRITE
) == -EIO
)
1336 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1339 static void ff_layout_write_call_done(struct rpc_task
*task
, void *data
)
1341 struct nfs_pgio_header
*hdr
= data
;
1343 nfs4_ff_layout_stat_io_end_write(task
,
1344 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1345 hdr
->args
.count
, hdr
->res
.count
,
1346 hdr
->res
.verf
->committed
);
1348 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1349 task
->tk_status
== 0) {
1350 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1354 /* Note this may cause RPC to be resent */
1355 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1358 static void ff_layout_write_count_stats(struct rpc_task
*task
, void *data
)
1360 struct nfs_pgio_header
*hdr
= data
;
1362 rpc_count_iostats_metrics(task
,
1363 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_WRITE
]);
1366 static void ff_layout_commit_prepare_common(struct rpc_task
*task
,
1367 struct nfs_commit_data
*cdata
)
1369 nfs4_ff_layout_stat_io_start_write(cdata
->inode
,
1370 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1374 static void ff_layout_commit_prepare_v3(struct rpc_task
*task
, void *data
)
1376 ff_layout_commit_prepare_common(task
, data
);
1377 rpc_call_start(task
);
1380 static void ff_layout_commit_prepare_v4(struct rpc_task
*task
, void *data
)
1382 struct nfs_commit_data
*wdata
= data
;
1384 if (ff_layout_setup_sequence(wdata
->ds_clp
,
1385 &wdata
->args
.seq_args
,
1386 &wdata
->res
.seq_res
,
1389 ff_layout_commit_prepare_common(task
, data
);
1392 static void ff_layout_commit_done(struct rpc_task
*task
, void *data
)
1394 struct nfs_commit_data
*cdata
= data
;
1395 struct nfs_page
*req
;
1398 if (task
->tk_status
== 0) {
1399 list_for_each_entry(req
, &cdata
->pages
, wb_list
)
1400 count
+= req
->wb_bytes
;
1403 nfs4_ff_layout_stat_io_end_write(task
,
1404 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1405 count
, count
, NFS_FILE_SYNC
);
1407 pnfs_generic_write_commit_done(task
, data
);
1410 static void ff_layout_commit_count_stats(struct rpc_task
*task
, void *data
)
1412 struct nfs_commit_data
*cdata
= data
;
1414 rpc_count_iostats_metrics(task
,
1415 &NFS_CLIENT(cdata
->inode
)->cl_metrics
[NFSPROC4_CLNT_COMMIT
]);
1418 static const struct rpc_call_ops ff_layout_read_call_ops_v3
= {
1419 .rpc_call_prepare
= ff_layout_read_prepare_v3
,
1420 .rpc_call_done
= ff_layout_read_call_done
,
1421 .rpc_count_stats
= ff_layout_read_count_stats
,
1422 .rpc_release
= pnfs_generic_rw_release
,
1425 static const struct rpc_call_ops ff_layout_read_call_ops_v4
= {
1426 .rpc_call_prepare
= ff_layout_read_prepare_v4
,
1427 .rpc_call_done
= ff_layout_read_call_done
,
1428 .rpc_count_stats
= ff_layout_read_count_stats
,
1429 .rpc_release
= pnfs_generic_rw_release
,
1432 static const struct rpc_call_ops ff_layout_write_call_ops_v3
= {
1433 .rpc_call_prepare
= ff_layout_write_prepare_v3
,
1434 .rpc_call_done
= ff_layout_write_call_done
,
1435 .rpc_count_stats
= ff_layout_write_count_stats
,
1436 .rpc_release
= pnfs_generic_rw_release
,
1439 static const struct rpc_call_ops ff_layout_write_call_ops_v4
= {
1440 .rpc_call_prepare
= ff_layout_write_prepare_v4
,
1441 .rpc_call_done
= ff_layout_write_call_done
,
1442 .rpc_count_stats
= ff_layout_write_count_stats
,
1443 .rpc_release
= pnfs_generic_rw_release
,
1446 static const struct rpc_call_ops ff_layout_commit_call_ops_v3
= {
1447 .rpc_call_prepare
= ff_layout_commit_prepare_v3
,
1448 .rpc_call_done
= ff_layout_commit_done
,
1449 .rpc_count_stats
= ff_layout_commit_count_stats
,
1450 .rpc_release
= pnfs_generic_commit_release
,
1453 static const struct rpc_call_ops ff_layout_commit_call_ops_v4
= {
1454 .rpc_call_prepare
= ff_layout_commit_prepare_v4
,
1455 .rpc_call_done
= ff_layout_commit_done
,
1456 .rpc_count_stats
= ff_layout_commit_count_stats
,
1457 .rpc_release
= pnfs_generic_commit_release
,
1460 static enum pnfs_try_status
1461 ff_layout_read_pagelist(struct nfs_pgio_header
*hdr
)
1463 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1464 struct nfs4_pnfs_ds
*ds
;
1465 struct rpc_clnt
*ds_clnt
;
1466 struct rpc_cred
*ds_cred
;
1467 loff_t offset
= hdr
->args
.offset
;
1468 u32 idx
= hdr
->pgio_mirror_idx
;
1472 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1473 __func__
, hdr
->inode
->i_ino
,
1474 hdr
->args
.pgbase
, (size_t)hdr
->args
.count
, offset
);
1476 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, false);
1480 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1482 if (IS_ERR(ds_clnt
))
1485 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1486 if (IS_ERR(ds_cred
))
1489 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1491 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__
,
1492 ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
), vers
);
1494 atomic_inc(&ds
->ds_clp
->cl_count
);
1495 hdr
->ds_clp
= ds
->ds_clp
;
1496 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1500 * Note that if we ever decide to split across DSes,
1501 * then we may need to handle dense-like offsets.
1503 hdr
->args
.offset
= offset
;
1504 hdr
->mds_offset
= offset
;
1506 /* Perform an asynchronous read to ds */
1507 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1508 vers
== 3 ? &ff_layout_read_call_ops_v3
:
1509 &ff_layout_read_call_ops_v4
,
1510 0, RPC_TASK_SOFTCONN
);
1512 return PNFS_ATTEMPTED
;
1515 if (ff_layout_has_available_ds(lseg
))
1516 return PNFS_TRY_AGAIN
;
1517 return PNFS_NOT_ATTEMPTED
;
1520 /* Perform async writes. */
1521 static enum pnfs_try_status
1522 ff_layout_write_pagelist(struct nfs_pgio_header
*hdr
, int sync
)
1524 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1525 struct nfs4_pnfs_ds
*ds
;
1526 struct rpc_clnt
*ds_clnt
;
1527 struct rpc_cred
*ds_cred
;
1528 loff_t offset
= hdr
->args
.offset
;
1531 int idx
= hdr
->pgio_mirror_idx
;
1533 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1535 return PNFS_NOT_ATTEMPTED
;
1537 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1539 if (IS_ERR(ds_clnt
))
1540 return PNFS_NOT_ATTEMPTED
;
1542 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1543 if (IS_ERR(ds_cred
))
1544 return PNFS_NOT_ATTEMPTED
;
1546 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1548 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1549 __func__
, hdr
->inode
->i_ino
, sync
, (size_t) hdr
->args
.count
,
1550 offset
, ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
),
1553 hdr
->pgio_done_cb
= ff_layout_write_done_cb
;
1554 atomic_inc(&ds
->ds_clp
->cl_count
);
1555 hdr
->ds_clp
= ds
->ds_clp
;
1556 hdr
->ds_commit_idx
= idx
;
1557 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1562 * Note that if we ever decide to split across DSes,
1563 * then we may need to handle dense-like offsets.
1565 hdr
->args
.offset
= offset
;
1567 /* Perform an asynchronous write */
1568 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1569 vers
== 3 ? &ff_layout_write_call_ops_v3
:
1570 &ff_layout_write_call_ops_v4
,
1571 sync
, RPC_TASK_SOFTCONN
);
1572 return PNFS_ATTEMPTED
;
1575 static u32
calc_ds_index_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1580 static struct nfs_fh
*
1581 select_ds_fh_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1583 struct nfs4_ff_layout_segment
*flseg
= FF_LAYOUT_LSEG(lseg
);
1585 /* FIXME: Assume that there is only one NFS version available
1588 return &flseg
->mirror_array
[i
]->fh_versions
[0];
1591 static int ff_layout_initiate_commit(struct nfs_commit_data
*data
, int how
)
1593 struct pnfs_layout_segment
*lseg
= data
->lseg
;
1594 struct nfs4_pnfs_ds
*ds
;
1595 struct rpc_clnt
*ds_clnt
;
1596 struct rpc_cred
*ds_cred
;
1601 idx
= calc_ds_index_from_commit(lseg
, data
->ds_commit_index
);
1602 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1606 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1608 if (IS_ERR(ds_clnt
))
1611 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, data
->cred
);
1612 if (IS_ERR(ds_cred
))
1615 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1617 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__
,
1618 data
->inode
->i_ino
, how
, atomic_read(&ds
->ds_clp
->cl_count
),
1620 data
->commit_done_cb
= ff_layout_commit_done_cb
;
1621 data
->cred
= ds_cred
;
1622 atomic_inc(&ds
->ds_clp
->cl_count
);
1623 data
->ds_clp
= ds
->ds_clp
;
1624 fh
= select_ds_fh_from_commit(lseg
, data
->ds_commit_index
);
1628 return nfs_initiate_commit(ds_clnt
, data
, ds
->ds_clp
->rpc_ops
,
1629 vers
== 3 ? &ff_layout_commit_call_ops_v3
:
1630 &ff_layout_commit_call_ops_v4
,
1631 how
, RPC_TASK_SOFTCONN
);
1633 pnfs_generic_prepare_to_resend_writes(data
);
1634 pnfs_generic_commit_release(data
);
1639 ff_layout_commit_pagelist(struct inode
*inode
, struct list_head
*mds_pages
,
1640 int how
, struct nfs_commit_info
*cinfo
)
1642 return pnfs_generic_commit_pagelist(inode
, mds_pages
, how
, cinfo
,
1643 ff_layout_initiate_commit
);
1646 static struct pnfs_ds_commit_info
*
1647 ff_layout_get_ds_info(struct inode
*inode
)
1649 struct pnfs_layout_hdr
*layout
= NFS_I(inode
)->layout
;
1654 return &FF_LAYOUT_FROM_HDR(layout
)->commit_info
;
1658 ff_layout_free_deviceid_node(struct nfs4_deviceid_node
*d
)
1660 nfs4_ff_layout_free_deviceid(container_of(d
, struct nfs4_ff_layout_ds
,
1664 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout
*flo
,
1665 struct xdr_stream
*xdr
,
1666 const struct nfs4_layoutreturn_args
*args
)
1668 struct pnfs_layout_hdr
*hdr
= &flo
->generic_hdr
;
1670 int count
= 0, ret
= 0;
1672 start
= xdr_reserve_space(xdr
, 4);
1673 if (unlikely(!start
))
1676 /* This assume we always return _ALL_ layouts */
1677 spin_lock(&hdr
->plh_inode
->i_lock
);
1678 ret
= ff_layout_encode_ds_ioerr(flo
, xdr
, &count
, &args
->range
);
1679 spin_unlock(&hdr
->plh_inode
->i_lock
);
1681 *start
= cpu_to_be32(count
);
1686 /* report nothing for now */
1687 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout
*flo
,
1688 struct xdr_stream
*xdr
,
1689 const struct nfs4_layoutreturn_args
*args
)
1693 p
= xdr_reserve_space(xdr
, 4);
1695 *p
= cpu_to_be32(0);
1698 static struct nfs4_deviceid_node
*
1699 ff_layout_alloc_deviceid_node(struct nfs_server
*server
,
1700 struct pnfs_device
*pdev
, gfp_t gfp_flags
)
1702 struct nfs4_ff_layout_ds
*dsaddr
;
1704 dsaddr
= nfs4_ff_alloc_deviceid_node(server
, pdev
, gfp_flags
);
1707 return &dsaddr
->id_node
;
1711 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr
*lo
,
1712 struct xdr_stream
*xdr
,
1713 const struct nfs4_layoutreturn_args
*args
)
1715 struct nfs4_flexfile_layout
*flo
= FF_LAYOUT_FROM_HDR(lo
);
1718 dprintk("%s: Begin\n", __func__
);
1719 start
= xdr_reserve_space(xdr
, 4);
1722 if (ff_layout_encode_ioerr(flo
, xdr
, args
))
1725 ff_layout_encode_iostats(flo
, xdr
, args
);
1727 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
1728 dprintk("%s: Return\n", __func__
);
1732 ff_layout_ntop4(const struct sockaddr
*sap
, char *buf
, const size_t buflen
)
1734 const struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
1736 return snprintf(buf
, buflen
, "%pI4", &sin
->sin_addr
);
1740 ff_layout_ntop6_noscopeid(const struct sockaddr
*sap
, char *buf
,
1743 const struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)sap
;
1744 const struct in6_addr
*addr
= &sin6
->sin6_addr
;
1747 * RFC 4291, Section 2.2.2
1749 * Shorthanded ANY address
1751 if (ipv6_addr_any(addr
))
1752 return snprintf(buf
, buflen
, "::");
1755 * RFC 4291, Section 2.2.2
1757 * Shorthanded loopback address
1759 if (ipv6_addr_loopback(addr
))
1760 return snprintf(buf
, buflen
, "::1");
1763 * RFC 4291, Section 2.2.3
1765 * Special presentation address format for mapped v4
1768 if (ipv6_addr_v4mapped(addr
))
1769 return snprintf(buf
, buflen
, "::ffff:%pI4",
1770 &addr
->s6_addr32
[3]);
1773 * RFC 4291, Section 2.2.1
1775 return snprintf(buf
, buflen
, "%pI6c", addr
);
1778 /* Derived from rpc_sockaddr2uaddr */
1780 ff_layout_encode_netaddr(struct xdr_stream
*xdr
, struct nfs4_pnfs_ds_addr
*da
)
1782 struct sockaddr
*sap
= (struct sockaddr
*)&da
->da_addr
;
1783 char portbuf
[RPCBIND_MAXUADDRPLEN
];
1784 char addrbuf
[RPCBIND_MAXUADDRLEN
];
1786 unsigned short port
;
1790 switch (sap
->sa_family
) {
1792 if (ff_layout_ntop4(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
1794 port
= ntohs(((struct sockaddr_in
*)sap
)->sin_port
);
1799 if (ff_layout_ntop6_noscopeid(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
1801 port
= ntohs(((struct sockaddr_in6
*)sap
)->sin6_port
);
1806 /* we only support tcp and tcp6 */
1811 snprintf(portbuf
, sizeof(portbuf
), ".%u.%u", port
>> 8, port
& 0xff);
1812 len
= strlcat(addrbuf
, portbuf
, sizeof(addrbuf
));
1814 p
= xdr_reserve_space(xdr
, 4 + netid_len
);
1815 xdr_encode_opaque(p
, netid
, netid_len
);
1817 p
= xdr_reserve_space(xdr
, 4 + len
);
1818 xdr_encode_opaque(p
, addrbuf
, len
);
1822 ff_layout_encode_nfstime(struct xdr_stream
*xdr
,
1825 struct timespec64 ts
;
1828 p
= xdr_reserve_space(xdr
, 12);
1829 ts
= ktime_to_timespec64(t
);
1830 p
= xdr_encode_hyper(p
, ts
.tv_sec
);
1831 *p
++ = cpu_to_be32(ts
.tv_nsec
);
1835 ff_layout_encode_io_latency(struct xdr_stream
*xdr
,
1836 struct nfs4_ff_io_stat
*stat
)
1840 p
= xdr_reserve_space(xdr
, 5 * 8);
1841 p
= xdr_encode_hyper(p
, stat
->ops_requested
);
1842 p
= xdr_encode_hyper(p
, stat
->bytes_requested
);
1843 p
= xdr_encode_hyper(p
, stat
->ops_completed
);
1844 p
= xdr_encode_hyper(p
, stat
->bytes_completed
);
1845 p
= xdr_encode_hyper(p
, stat
->bytes_not_delivered
);
1846 ff_layout_encode_nfstime(xdr
, stat
->total_busy_time
);
1847 ff_layout_encode_nfstime(xdr
, stat
->aggregate_completion_time
);
1851 ff_layout_encode_layoutstats(struct xdr_stream
*xdr
,
1852 struct nfs42_layoutstat_args
*args
,
1853 struct nfs42_layoutstat_devinfo
*devinfo
)
1855 struct nfs4_ff_layout_mirror
*mirror
= devinfo
->layout_private
;
1856 struct nfs4_pnfs_ds_addr
*da
;
1857 struct nfs4_pnfs_ds
*ds
= mirror
->mirror_ds
->ds
;
1858 struct nfs_fh
*fh
= &mirror
->fh_versions
[0];
1861 da
= list_first_entry(&ds
->ds_addrs
, struct nfs4_pnfs_ds_addr
, da_node
);
1862 dprintk("%s: DS %s: encoding address %s\n",
1863 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
1864 /* layoutupdate length */
1865 start
= xdr_reserve_space(xdr
, 4);
1867 ff_layout_encode_netaddr(xdr
, da
);
1869 p
= xdr_reserve_space(xdr
, 4 + fh
->size
);
1870 xdr_encode_opaque(p
, fh
->data
, fh
->size
);
1871 /* ff_io_latency4 read */
1872 spin_lock(&mirror
->lock
);
1873 ff_layout_encode_io_latency(xdr
, &mirror
->read_stat
.io_stat
);
1874 /* ff_io_latency4 write */
1875 ff_layout_encode_io_latency(xdr
, &mirror
->write_stat
.io_stat
);
1876 spin_unlock(&mirror
->lock
);
1878 ff_layout_encode_nfstime(xdr
, ktime_sub(ktime_get(), mirror
->start_time
));
1880 p
= xdr_reserve_space(xdr
, 4);
1881 *p
= cpu_to_be32(false);
1883 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
1887 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args
*args
,
1888 struct pnfs_layout_segment
*pls
,
1889 int *dev_count
, int dev_limit
)
1891 struct nfs4_ff_layout_mirror
*mirror
;
1892 struct nfs4_deviceid_node
*dev
;
1893 struct nfs42_layoutstat_devinfo
*devinfo
;
1896 for (i
= 0; i
< FF_LAYOUT_MIRROR_COUNT(pls
); i
++) {
1897 if (*dev_count
>= dev_limit
)
1899 mirror
= FF_LAYOUT_COMP(pls
, i
);
1900 if (!mirror
|| !mirror
->mirror_ds
)
1902 dev
= FF_LAYOUT_DEVID_NODE(pls
, i
);
1903 devinfo
= &args
->devinfo
[*dev_count
];
1904 memcpy(&devinfo
->dev_id
, &dev
->deviceid
, NFS4_DEVICEID4_SIZE
);
1905 devinfo
->offset
= pls
->pls_range
.offset
;
1906 devinfo
->length
= pls
->pls_range
.length
;
1907 devinfo
->read_count
= mirror
->read_stat
.io_stat
.ops_completed
;
1908 devinfo
->read_bytes
= mirror
->read_stat
.io_stat
.bytes_completed
;
1909 devinfo
->write_count
= mirror
->write_stat
.io_stat
.ops_completed
;
1910 devinfo
->write_bytes
= mirror
->write_stat
.io_stat
.bytes_completed
;
1911 devinfo
->layout_type
= LAYOUT_FLEX_FILES
;
1912 devinfo
->layoutstats_encode
= ff_layout_encode_layoutstats
;
1913 devinfo
->layout_private
= mirror
;
1914 /* mirror refcount put in cleanup_layoutstats */
1915 atomic_inc(&mirror
->ref
);
1920 return *dev_count
< dev_limit
;
1924 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args
*args
)
1926 struct pnfs_layout_segment
*pls
;
1929 spin_lock(&args
->inode
->i_lock
);
1930 list_for_each_entry(pls
, &NFS_I(args
->inode
)->layout
->plh_segs
, pls_list
) {
1931 dev_count
+= FF_LAYOUT_MIRROR_COUNT(pls
);
1933 spin_unlock(&args
->inode
->i_lock
);
1934 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
1935 if (dev_count
> PNFS_LAYOUTSTATS_MAXDEV
) {
1936 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
1937 __func__
, dev_count
, PNFS_LAYOUTSTATS_MAXDEV
);
1938 dev_count
= PNFS_LAYOUTSTATS_MAXDEV
;
1940 args
->devinfo
= kmalloc(dev_count
* sizeof(*args
->devinfo
), GFP_KERNEL
);
1945 spin_lock(&args
->inode
->i_lock
);
1946 list_for_each_entry(pls
, &NFS_I(args
->inode
)->layout
->plh_segs
, pls_list
) {
1947 if (!ff_layout_mirror_prepare_stats(args
, pls
, &dev_count
,
1948 PNFS_LAYOUTSTATS_MAXDEV
)) {
1952 spin_unlock(&args
->inode
->i_lock
);
1953 args
->num_dev
= dev_count
;
1959 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data
*data
)
1961 struct nfs4_ff_layout_mirror
*mirror
;
1964 for (i
= 0; i
< data
->args
.num_dev
; i
++) {
1965 mirror
= data
->args
.devinfo
[i
].layout_private
;
1966 data
->args
.devinfo
[i
].layout_private
= NULL
;
1967 ff_layout_put_mirror(mirror
);
1971 static struct pnfs_layoutdriver_type flexfilelayout_type
= {
1972 .id
= LAYOUT_FLEX_FILES
,
1973 .name
= "LAYOUT_FLEX_FILES",
1974 .owner
= THIS_MODULE
,
1975 .alloc_layout_hdr
= ff_layout_alloc_layout_hdr
,
1976 .free_layout_hdr
= ff_layout_free_layout_hdr
,
1977 .alloc_lseg
= ff_layout_alloc_lseg
,
1978 .free_lseg
= ff_layout_free_lseg
,
1979 .pg_read_ops
= &ff_layout_pg_read_ops
,
1980 .pg_write_ops
= &ff_layout_pg_write_ops
,
1981 .get_ds_info
= ff_layout_get_ds_info
,
1982 .free_deviceid_node
= ff_layout_free_deviceid_node
,
1983 .mark_request_commit
= pnfs_layout_mark_request_commit
,
1984 .clear_request_commit
= pnfs_generic_clear_request_commit
,
1985 .scan_commit_lists
= pnfs_generic_scan_commit_lists
,
1986 .recover_commit_reqs
= pnfs_generic_recover_commit_reqs
,
1987 .commit_pagelist
= ff_layout_commit_pagelist
,
1988 .read_pagelist
= ff_layout_read_pagelist
,
1989 .write_pagelist
= ff_layout_write_pagelist
,
1990 .alloc_deviceid_node
= ff_layout_alloc_deviceid_node
,
1991 .encode_layoutreturn
= ff_layout_encode_layoutreturn
,
1992 .sync
= pnfs_nfs_generic_sync
,
1993 .prepare_layoutstats
= ff_layout_prepare_layoutstats
,
1994 .cleanup_layoutstats
= ff_layout_cleanup_layoutstats
,
1997 static int __init
nfs4flexfilelayout_init(void)
1999 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Registering...\n",
2001 return pnfs_register_layoutdriver(&flexfilelayout_type
);
2004 static void __exit
nfs4flexfilelayout_exit(void)
2006 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2008 pnfs_unregister_layoutdriver(&flexfilelayout_type
);
2011 MODULE_ALIAS("nfs-layouttype4-4");
2013 MODULE_LICENSE("GPL");
2014 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2016 module_init(nfs4flexfilelayout_init
);
2017 module_exit(nfs4flexfilelayout_exit
);