2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
56 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 static const char transfertypes
[][12] = {
58 "pure inline", /* no chunks */
59 " read chunk", /* some argument via rdma read */
60 "*read chunk", /* entire request via rdma read */
61 "write chunk", /* some result via rdma write */
62 "reply chunk" /* entire reply via rdma write */
67 * Chunk assembly from upper layer xdr_buf.
69 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
70 * elements. Segments are then coalesced when registered, if possible
71 * within the selected memreg mode.
73 * Returns positive number of segments converted, or a negative errno.
77 rpcrdma_convert_iovs(struct xdr_buf
*xdrbuf
, unsigned int pos
,
78 enum rpcrdma_chunktype type
, struct rpcrdma_mr_seg
*seg
, int nsegs
)
84 if (pos
== 0 && xdrbuf
->head
[0].iov_len
) {
85 seg
[n
].mr_page
= NULL
;
86 seg
[n
].mr_offset
= xdrbuf
->head
[0].iov_base
;
87 seg
[n
].mr_len
= xdrbuf
->head
[0].iov_len
;
91 len
= xdrbuf
->page_len
;
92 ppages
= xdrbuf
->pages
+ (xdrbuf
->page_base
>> PAGE_SHIFT
);
93 page_base
= xdrbuf
->page_base
& ~PAGE_MASK
;
95 while (len
&& n
< nsegs
) {
97 /* alloc the pagelist for receiving buffer */
98 ppages
[p
] = alloc_page(GFP_ATOMIC
);
102 seg
[n
].mr_page
= ppages
[p
];
103 seg
[n
].mr_offset
= (void *)(unsigned long) page_base
;
104 seg
[n
].mr_len
= min_t(u32
, PAGE_SIZE
- page_base
, len
);
105 if (seg
[n
].mr_len
> PAGE_SIZE
)
107 len
-= seg
[n
].mr_len
;
110 page_base
= 0; /* page offset only applies to first page */
113 /* Message overflows the seg array */
114 if (len
&& n
== nsegs
)
117 if (xdrbuf
->tail
[0].iov_len
) {
118 /* the rpcrdma protocol allows us to omit any trailing
119 * xdr pad bytes, saving the server an RDMA operation. */
120 if (xdrbuf
->tail
[0].iov_len
< 4 && xprt_rdma_pad_optimize
)
123 /* Tail remains, but we're out of segments */
125 seg
[n
].mr_page
= NULL
;
126 seg
[n
].mr_offset
= xdrbuf
->tail
[0].iov_base
;
127 seg
[n
].mr_len
= xdrbuf
->tail
[0].iov_len
;
135 * Create read/write chunk lists, and reply chunks, for RDMA
137 * Assume check against THRESHOLD has been done, and chunks are required.
138 * Assume only encoding one list entry for read|write chunks. The NFSv3
139 * protocol is simple enough to allow this as it only has a single "bulk
140 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
141 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
143 * When used for a single reply chunk (which is a special write
144 * chunk used for the entire reply, rather than just the data), it
145 * is used primarily for READDIR and READLINK which would otherwise
146 * be severely size-limited by a small rdma inline read max. The server
147 * response will come back as an RDMA Write, followed by a message
148 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
149 * chunks do not provide data alignment, however they do not require
150 * "fixup" (moving the response to the upper layer buffer) either.
152 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
154 * Read chunklist (a linked list):
155 * N elements, position P (same P for all chunks of same arg!):
156 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
158 * Write chunklist (a list of (one) counted array):
160 * 1 - N - HLOO - HLOO - ... - HLOO - 0
162 * Reply chunk (a counted array):
164 * 1 - N - HLOO - HLOO - ... - HLOO
166 * Returns positive RPC/RDMA header size, or negative errno.
170 rpcrdma_create_chunks(struct rpc_rqst
*rqst
, struct xdr_buf
*target
,
171 struct rpcrdma_msg
*headerp
, enum rpcrdma_chunktype type
)
173 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
174 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
175 int n
, nsegs
, nchunks
= 0;
177 struct rpcrdma_mr_seg
*seg
= req
->rl_segments
;
178 struct rpcrdma_read_chunk
*cur_rchunk
= NULL
;
179 struct rpcrdma_write_array
*warray
= NULL
;
180 struct rpcrdma_write_chunk
*cur_wchunk
= NULL
;
181 __be32
*iptr
= headerp
->rm_body
.rm_chunks
;
183 if (type
== rpcrdma_readch
|| type
== rpcrdma_areadch
) {
184 /* a read chunk - server will RDMA Read our memory */
185 cur_rchunk
= (struct rpcrdma_read_chunk
*) iptr
;
187 /* a write or reply chunk - server will RDMA Write our memory */
188 *iptr
++ = xdr_zero
; /* encode a NULL read chunk list */
189 if (type
== rpcrdma_replych
)
190 *iptr
++ = xdr_zero
; /* a NULL write chunk list */
191 warray
= (struct rpcrdma_write_array
*) iptr
;
192 cur_wchunk
= (struct rpcrdma_write_chunk
*) (warray
+ 1);
195 if (type
== rpcrdma_replych
|| type
== rpcrdma_areadch
)
198 pos
= target
->head
[0].iov_len
;
200 nsegs
= rpcrdma_convert_iovs(target
, pos
, type
, seg
, RPCRDMA_MAX_SEGS
);
205 n
= rpcrdma_register_external(seg
, nsegs
,
206 cur_wchunk
!= NULL
, r_xprt
);
209 if (cur_rchunk
) { /* read */
210 cur_rchunk
->rc_discrim
= xdr_one
;
211 /* all read chunks have the same "position" */
212 cur_rchunk
->rc_position
= htonl(pos
);
213 cur_rchunk
->rc_target
.rs_handle
= htonl(seg
->mr_rkey
);
214 cur_rchunk
->rc_target
.rs_length
= htonl(seg
->mr_len
);
216 (__be32
*)&cur_rchunk
->rc_target
.rs_offset
,
218 dprintk("RPC: %s: read chunk "
219 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__
,
220 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
221 seg
->mr_rkey
, pos
, n
< nsegs
? "more" : "last");
223 r_xprt
->rx_stats
.read_chunk_count
++;
224 } else { /* write/reply */
225 cur_wchunk
->wc_target
.rs_handle
= htonl(seg
->mr_rkey
);
226 cur_wchunk
->wc_target
.rs_length
= htonl(seg
->mr_len
);
228 (__be32
*)&cur_wchunk
->wc_target
.rs_offset
,
230 dprintk("RPC: %s: %s chunk "
231 "elem %d@0x%llx:0x%x (%s)\n", __func__
,
232 (type
== rpcrdma_replych
) ? "reply" : "write",
233 seg
->mr_len
, (unsigned long long)seg
->mr_base
,
234 seg
->mr_rkey
, n
< nsegs
? "more" : "last");
236 if (type
== rpcrdma_replych
)
237 r_xprt
->rx_stats
.reply_chunk_count
++;
239 r_xprt
->rx_stats
.write_chunk_count
++;
240 r_xprt
->rx_stats
.total_rdma_request
+= seg
->mr_len
;
247 /* success. all failures return above */
248 req
->rl_nchunks
= nchunks
;
251 * finish off header. If write, marshal discrim and nchunks.
254 iptr
= (__be32
*) cur_rchunk
;
255 *iptr
++ = xdr_zero
; /* finish the read chunk list */
256 *iptr
++ = xdr_zero
; /* encode a NULL write chunk list */
257 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
259 warray
->wc_discrim
= xdr_one
;
260 warray
->wc_nchunks
= htonl(nchunks
);
261 iptr
= (__be32
*) cur_wchunk
;
262 if (type
== rpcrdma_writech
) {
263 *iptr
++ = xdr_zero
; /* finish the write chunk list */
264 *iptr
++ = xdr_zero
; /* encode a NULL reply chunk */
269 * Return header size.
271 return (unsigned char *)iptr
- (unsigned char *)headerp
;
274 if (r_xprt
->rx_ia
.ri_memreg_strategy
!= RPCRDMA_FRMR
) {
275 for (pos
= 0; nchunks
--;)
276 pos
+= rpcrdma_deregister_external(
277 &req
->rl_segments
[pos
], r_xprt
);
283 * Marshal chunks. This routine returns the header length
284 * consumed by marshaling.
286 * Returns positive RPC/RDMA header size, or negative errno.
290 rpcrdma_marshal_chunks(struct rpc_rqst
*rqst
, ssize_t result
)
292 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
293 struct rpcrdma_msg
*headerp
= (struct rpcrdma_msg
*)req
->rl_base
;
295 if (req
->rl_rtype
!= rpcrdma_noch
)
296 result
= rpcrdma_create_chunks(rqst
, &rqst
->rq_snd_buf
,
297 headerp
, req
->rl_rtype
);
298 else if (req
->rl_wtype
!= rpcrdma_noch
)
299 result
= rpcrdma_create_chunks(rqst
, &rqst
->rq_rcv_buf
,
300 headerp
, req
->rl_wtype
);
305 * Copy write data inline.
306 * This function is used for "small" requests. Data which is passed
307 * to RPC via iovecs (or page list) is copied directly into the
308 * pre-registered memory buffer for this request. For small amounts
309 * of data, this is efficient. The cutoff value is tunable.
312 rpcrdma_inline_pullup(struct rpc_rqst
*rqst
, int pad
)
314 int i
, npages
, curlen
;
316 unsigned char *srcp
, *destp
;
317 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(rqst
->rq_xprt
);
319 struct page
**ppages
;
321 destp
= rqst
->rq_svec
[0].iov_base
;
322 curlen
= rqst
->rq_svec
[0].iov_len
;
325 * Do optional padding where it makes sense. Alignment of write
326 * payload can help the server, if our setting is accurate.
328 pad
-= (curlen
+ 36/*sizeof(struct rpcrdma_msg_padded)*/);
329 if (pad
< 0 || rqst
->rq_slen
- curlen
< RPCRDMA_INLINE_PAD_THRESH
)
330 pad
= 0; /* don't pad this request */
332 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
333 __func__
, pad
, destp
, rqst
->rq_slen
, curlen
);
335 copy_len
= rqst
->rq_snd_buf
.page_len
;
337 if (rqst
->rq_snd_buf
.tail
[0].iov_len
) {
338 curlen
= rqst
->rq_snd_buf
.tail
[0].iov_len
;
339 if (destp
+ copy_len
!= rqst
->rq_snd_buf
.tail
[0].iov_base
) {
340 memmove(destp
+ copy_len
,
341 rqst
->rq_snd_buf
.tail
[0].iov_base
, curlen
);
342 r_xprt
->rx_stats
.pullup_copy_count
+= curlen
;
344 dprintk("RPC: %s: tail destp 0x%p len %d\n",
345 __func__
, destp
+ copy_len
, curlen
);
346 rqst
->rq_svec
[0].iov_len
+= curlen
;
348 r_xprt
->rx_stats
.pullup_copy_count
+= copy_len
;
350 page_base
= rqst
->rq_snd_buf
.page_base
;
351 ppages
= rqst
->rq_snd_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
352 page_base
&= ~PAGE_MASK
;
353 npages
= PAGE_ALIGN(page_base
+copy_len
) >> PAGE_SHIFT
;
354 for (i
= 0; copy_len
&& i
< npages
; i
++) {
355 curlen
= PAGE_SIZE
- page_base
;
356 if (curlen
> copy_len
)
358 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
359 __func__
, i
, destp
, copy_len
, curlen
);
360 srcp
= kmap_atomic(ppages
[i
]);
361 memcpy(destp
, srcp
+page_base
, curlen
);
363 rqst
->rq_svec
[0].iov_len
+= curlen
;
368 /* header now contains entire send message */
373 * Marshal a request: the primary job of this routine is to choose
374 * the transfer modes. See comments below.
376 * Uses multiple RDMA IOVs for a request:
377 * [0] -- RPC RDMA header, which uses memory from the *start* of the
378 * preregistered buffer that already holds the RPC data in
380 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
381 * [2] -- optional padding.
382 * [3] -- if padded, header only in [1] and data here.
384 * Returns zero on success, otherwise a negative errno.
388 rpcrdma_marshal_req(struct rpc_rqst
*rqst
)
390 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
391 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
392 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
394 size_t rpclen
, padlen
;
396 struct rpcrdma_msg
*headerp
;
399 * rpclen gets amount of data in first buffer, which is the
400 * pre-registered buffer.
402 base
= rqst
->rq_svec
[0].iov_base
;
403 rpclen
= rqst
->rq_svec
[0].iov_len
;
405 /* build RDMA header in private area at front */
406 headerp
= (struct rpcrdma_msg
*) req
->rl_base
;
407 /* don't htonl XID, it's already done in request */
408 headerp
->rm_xid
= rqst
->rq_xid
;
409 headerp
->rm_vers
= xdr_one
;
410 headerp
->rm_credit
= htonl(r_xprt
->rx_buf
.rb_max_requests
);
411 headerp
->rm_type
= htonl(RDMA_MSG
);
414 * Chunks needed for results?
416 * o If the expected result is under the inline threshold, all ops
417 * return as inline (but see later).
418 * o Large non-read ops return as a single reply chunk.
419 * o Large read ops return data as write chunk(s), header as inline.
421 * Note: the NFS code sending down multiple result segments implies
422 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
426 * This code can handle read chunks, write chunks OR reply
427 * chunks -- only one type. If the request is too big to fit
428 * inline, then we will choose read chunks. If the request is
429 * a READ, then use write chunks to separate the file data
430 * into pages; otherwise use reply chunks.
432 if (rqst
->rq_rcv_buf
.buflen
<= RPCRDMA_INLINE_READ_THRESHOLD(rqst
))
433 req
->rl_wtype
= rpcrdma_noch
;
434 else if (rqst
->rq_rcv_buf
.page_len
== 0)
435 req
->rl_wtype
= rpcrdma_replych
;
436 else if (rqst
->rq_rcv_buf
.flags
& XDRBUF_READ
)
437 req
->rl_wtype
= rpcrdma_writech
;
439 req
->rl_wtype
= rpcrdma_replych
;
442 * Chunks needed for arguments?
444 * o If the total request is under the inline threshold, all ops
445 * are sent as inline.
446 * o Large non-write ops are sent with the entire message as a
447 * single read chunk (protocol 0-position special case).
448 * o Large write ops transmit data as read chunk(s), header as
451 * Note: the NFS code sending down multiple argument segments
452 * implies the op is a write.
453 * TBD check NFSv4 setacl
455 if (rqst
->rq_snd_buf
.len
<= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst
))
456 req
->rl_rtype
= rpcrdma_noch
;
457 else if (rqst
->rq_snd_buf
.page_len
== 0)
458 req
->rl_rtype
= rpcrdma_areadch
;
460 req
->rl_rtype
= rpcrdma_readch
;
462 /* The following simplification is not true forever */
463 if (req
->rl_rtype
!= rpcrdma_noch
&& req
->rl_wtype
== rpcrdma_replych
)
464 req
->rl_wtype
= rpcrdma_noch
;
465 if (req
->rl_rtype
!= rpcrdma_noch
&& req
->rl_wtype
!= rpcrdma_noch
) {
466 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
471 hdrlen
= 28; /*sizeof *headerp;*/
475 * Pull up any extra send data into the preregistered buffer.
476 * When padding is in use and applies to the transfer, insert
477 * it and change the message type.
479 if (req
->rl_rtype
== rpcrdma_noch
) {
481 padlen
= rpcrdma_inline_pullup(rqst
,
482 RPCRDMA_INLINE_PAD_VALUE(rqst
));
485 headerp
->rm_type
= htonl(RDMA_MSGP
);
486 headerp
->rm_body
.rm_padded
.rm_align
=
487 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst
));
488 headerp
->rm_body
.rm_padded
.rm_thresh
=
489 htonl(RPCRDMA_INLINE_PAD_THRESH
);
490 headerp
->rm_body
.rm_padded
.rm_pempty
[0] = xdr_zero
;
491 headerp
->rm_body
.rm_padded
.rm_pempty
[1] = xdr_zero
;
492 headerp
->rm_body
.rm_padded
.rm_pempty
[2] = xdr_zero
;
493 hdrlen
+= 2 * sizeof(u32
); /* extra words in padhdr */
494 if (req
->rl_wtype
!= rpcrdma_noch
) {
495 dprintk("RPC: %s: invalid chunk list\n",
500 headerp
->rm_body
.rm_nochunks
.rm_empty
[0] = xdr_zero
;
501 headerp
->rm_body
.rm_nochunks
.rm_empty
[1] = xdr_zero
;
502 headerp
->rm_body
.rm_nochunks
.rm_empty
[2] = xdr_zero
;
503 /* new length after pullup */
504 rpclen
= rqst
->rq_svec
[0].iov_len
;
506 * Currently we try to not actually use read inline.
507 * Reply chunks have the desirable property that
508 * they land, packed, directly in the target buffers
509 * without headers, so they require no fixup. The
510 * additional RDMA Write op sends the same amount
511 * of data, streams on-the-wire and adds no overhead
512 * on receive. Therefore, we request a reply chunk
513 * for non-writes wherever feasible and efficient.
515 if (req
->rl_wtype
== rpcrdma_noch
)
516 req
->rl_wtype
= rpcrdma_replych
;
520 hdrlen
= rpcrdma_marshal_chunks(rqst
, hdrlen
);
524 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
525 " headerp 0x%p base 0x%p lkey 0x%x\n",
526 __func__
, transfertypes
[req
->rl_wtype
], hdrlen
, rpclen
, padlen
,
527 headerp
, base
, req
->rl_iov
.lkey
);
530 * initialize send_iov's - normally only two: rdma chunk header and
531 * single preregistered RPC header buffer, but if padding is present,
532 * then use a preregistered (and zeroed) pad buffer between the RPC
533 * header and any write data. In all non-rdma cases, any following
534 * data has been copied into the RPC header buffer.
536 req
->rl_send_iov
[0].addr
= req
->rl_iov
.addr
;
537 req
->rl_send_iov
[0].length
= hdrlen
;
538 req
->rl_send_iov
[0].lkey
= req
->rl_iov
.lkey
;
540 req
->rl_send_iov
[1].addr
= req
->rl_iov
.addr
+ (base
- req
->rl_base
);
541 req
->rl_send_iov
[1].length
= rpclen
;
542 req
->rl_send_iov
[1].lkey
= req
->rl_iov
.lkey
;
547 struct rpcrdma_ep
*ep
= &r_xprt
->rx_ep
;
549 req
->rl_send_iov
[2].addr
= ep
->rep_pad
.addr
;
550 req
->rl_send_iov
[2].length
= padlen
;
551 req
->rl_send_iov
[2].lkey
= ep
->rep_pad
.lkey
;
553 req
->rl_send_iov
[3].addr
= req
->rl_send_iov
[1].addr
+ rpclen
;
554 req
->rl_send_iov
[3].length
= rqst
->rq_slen
- rpclen
;
555 req
->rl_send_iov
[3].lkey
= req
->rl_iov
.lkey
;
564 * Chase down a received write or reply chunklist to get length
565 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
568 rpcrdma_count_chunks(struct rpcrdma_rep
*rep
, unsigned int max
, int wrchunk
, __be32
**iptrp
)
570 unsigned int i
, total_len
;
571 struct rpcrdma_write_chunk
*cur_wchunk
;
573 i
= ntohl(**iptrp
); /* get array count */
576 cur_wchunk
= (struct rpcrdma_write_chunk
*) (*iptrp
+ 1);
579 struct rpcrdma_segment
*seg
= &cur_wchunk
->wc_target
;
582 xdr_decode_hyper((__be32
*)&seg
->rs_offset
, &off
);
583 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
585 ntohl(seg
->rs_length
),
586 (unsigned long long)off
,
587 ntohl(seg
->rs_handle
));
589 total_len
+= ntohl(seg
->rs_length
);
592 /* check and adjust for properly terminated write chunk */
594 __be32
*w
= (__be32
*) cur_wchunk
;
595 if (*w
++ != xdr_zero
)
597 cur_wchunk
= (struct rpcrdma_write_chunk
*) w
;
599 if ((char *) cur_wchunk
> rep
->rr_base
+ rep
->rr_len
)
602 *iptrp
= (__be32
*) cur_wchunk
;
607 * Scatter inline received data back into provided iov's.
610 rpcrdma_inline_fixup(struct rpc_rqst
*rqst
, char *srcp
, int copy_len
, int pad
)
612 int i
, npages
, curlen
, olen
;
614 struct page
**ppages
;
617 curlen
= rqst
->rq_rcv_buf
.head
[0].iov_len
;
618 if (curlen
> copy_len
) { /* write chunk header fixup */
620 rqst
->rq_rcv_buf
.head
[0].iov_len
= curlen
;
623 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
624 __func__
, srcp
, copy_len
, curlen
);
626 /* Shift pointer for first receive segment only */
627 rqst
->rq_rcv_buf
.head
[0].iov_base
= srcp
;
633 rpcx_to_rdmax(rqst
->rq_xprt
)->rx_stats
.fixup_copy_count
+= olen
;
634 page_base
= rqst
->rq_rcv_buf
.page_base
;
635 ppages
= rqst
->rq_rcv_buf
.pages
+ (page_base
>> PAGE_SHIFT
);
636 page_base
&= ~PAGE_MASK
;
638 if (copy_len
&& rqst
->rq_rcv_buf
.page_len
) {
639 npages
= PAGE_ALIGN(page_base
+
640 rqst
->rq_rcv_buf
.page_len
) >> PAGE_SHIFT
;
641 for (; i
< npages
; i
++) {
642 curlen
= PAGE_SIZE
- page_base
;
643 if (curlen
> copy_len
)
645 dprintk("RPC: %s: page %d"
646 " srcp 0x%p len %d curlen %d\n",
647 __func__
, i
, srcp
, copy_len
, curlen
);
648 destp
= kmap_atomic(ppages
[i
]);
649 memcpy(destp
+ page_base
, srcp
, curlen
);
650 flush_dcache_page(ppages
[i
]);
651 kunmap_atomic(destp
);
660 if (copy_len
&& rqst
->rq_rcv_buf
.tail
[0].iov_len
) {
662 if (curlen
> rqst
->rq_rcv_buf
.tail
[0].iov_len
)
663 curlen
= rqst
->rq_rcv_buf
.tail
[0].iov_len
;
664 if (rqst
->rq_rcv_buf
.tail
[0].iov_base
!= srcp
)
665 memmove(rqst
->rq_rcv_buf
.tail
[0].iov_base
, srcp
, curlen
);
666 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
667 __func__
, srcp
, copy_len
, curlen
);
668 rqst
->rq_rcv_buf
.tail
[0].iov_len
= curlen
;
669 copy_len
-= curlen
; ++i
;
671 rqst
->rq_rcv_buf
.tail
[0].iov_len
= 0;
674 /* implicit padding on terminal chunk */
675 unsigned char *p
= rqst
->rq_rcv_buf
.tail
[0].iov_base
;
677 p
[rqst
->rq_rcv_buf
.tail
[0].iov_len
++] = 0;
681 dprintk("RPC: %s: %d bytes in"
682 " %d extra segments (%d lost)\n",
683 __func__
, olen
, i
, copy_len
);
685 /* TBD avoid a warning from call_decode() */
686 rqst
->rq_private_buf
= rqst
->rq_rcv_buf
;
690 rpcrdma_connect_worker(struct work_struct
*work
)
692 struct rpcrdma_ep
*ep
=
693 container_of(work
, struct rpcrdma_ep
, rep_connect_worker
.work
);
694 struct rpc_xprt
*xprt
= ep
->rep_xprt
;
696 spin_lock_bh(&xprt
->transport_lock
);
697 if (++xprt
->connect_cookie
== 0) /* maintain a reserved value */
698 ++xprt
->connect_cookie
;
699 if (ep
->rep_connected
> 0) {
700 if (!xprt_test_and_set_connected(xprt
))
701 xprt_wake_pending_tasks(xprt
, 0);
703 if (xprt_test_and_clear_connected(xprt
))
704 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
706 spin_unlock_bh(&xprt
->transport_lock
);
710 * This function is called when an async event is posted to
711 * the connection which changes the connection state. All it
712 * does at this point is mark the connection up/down, the rpc
713 * timers do the rest.
716 rpcrdma_conn_func(struct rpcrdma_ep
*ep
)
718 schedule_delayed_work(&ep
->rep_connect_worker
, 0);
722 * Called as a tasklet to do req/reply match and complete a request
723 * Errors must result in the RPC task either being awakened, or
724 * allowed to timeout, to discover the errors at that time.
727 rpcrdma_reply_handler(struct rpcrdma_rep
*rep
)
729 struct rpcrdma_msg
*headerp
;
730 struct rpcrdma_req
*req
;
731 struct rpc_rqst
*rqst
;
732 struct rpc_xprt
*xprt
= rep
->rr_xprt
;
733 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
738 /* Check status. If bad, signal disconnect and return rep to pool */
739 if (rep
->rr_len
== ~0U) {
740 rpcrdma_recv_buffer_put(rep
);
741 if (r_xprt
->rx_ep
.rep_connected
== 1) {
742 r_xprt
->rx_ep
.rep_connected
= -EIO
;
743 rpcrdma_conn_func(&r_xprt
->rx_ep
);
747 if (rep
->rr_len
< 28) {
748 dprintk("RPC: %s: short/invalid reply\n", __func__
);
751 headerp
= (struct rpcrdma_msg
*) rep
->rr_base
;
752 if (headerp
->rm_vers
!= xdr_one
) {
753 dprintk("RPC: %s: invalid version %d\n",
754 __func__
, ntohl(headerp
->rm_vers
));
758 /* Get XID and try for a match. */
759 spin_lock(&xprt
->transport_lock
);
760 rqst
= xprt_lookup_rqst(xprt
, headerp
->rm_xid
);
762 spin_unlock(&xprt
->transport_lock
);
763 dprintk("RPC: %s: reply 0x%p failed "
764 "to match any request xid 0x%08x len %d\n",
765 __func__
, rep
, headerp
->rm_xid
, rep
->rr_len
);
767 r_xprt
->rx_stats
.bad_reply_count
++;
768 rep
->rr_func
= rpcrdma_reply_handler
;
769 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, rep
))
770 rpcrdma_recv_buffer_put(rep
);
775 /* get request object */
776 req
= rpcr_to_rdmar(rqst
);
778 spin_unlock(&xprt
->transport_lock
);
779 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
780 "request 0x%p: xid 0x%08x\n", __func__
, rep
, req
,
785 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
786 " RPC request 0x%p xid 0x%08x\n",
787 __func__
, rep
, req
, rqst
, headerp
->rm_xid
);
789 /* from here on, the reply is no longer an orphan */
791 xprt
->reestablish_timeout
= 0;
793 /* check for expected message types */
794 /* The order of some of these tests is important. */
795 switch (headerp
->rm_type
) {
796 case htonl(RDMA_MSG
):
797 /* never expect read chunks */
798 /* never expect reply chunks (two ways to check) */
799 /* never expect write chunks without having offered RDMA */
800 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
801 (headerp
->rm_body
.rm_chunks
[1] == xdr_zero
&&
802 headerp
->rm_body
.rm_chunks
[2] != xdr_zero
) ||
803 (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
&&
804 req
->rl_nchunks
== 0))
806 if (headerp
->rm_body
.rm_chunks
[1] != xdr_zero
) {
807 /* count any expected write chunks in read reply */
808 /* start at write chunk array count */
809 iptr
= &headerp
->rm_body
.rm_chunks
[2];
810 rdmalen
= rpcrdma_count_chunks(rep
,
811 req
->rl_nchunks
, 1, &iptr
);
812 /* check for validity, and no reply chunk after */
813 if (rdmalen
< 0 || *iptr
++ != xdr_zero
)
816 ((unsigned char *)iptr
- (unsigned char *)headerp
);
817 status
= rep
->rr_len
+ rdmalen
;
818 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
819 /* special case - last chunk may omit padding */
821 rdmalen
= 4 - rdmalen
;
825 /* else ordinary inline */
827 iptr
= (__be32
*)((unsigned char *)headerp
+ 28);
828 rep
->rr_len
-= 28; /*sizeof *headerp;*/
829 status
= rep
->rr_len
;
831 /* Fix up the rpc results for upper layer */
832 rpcrdma_inline_fixup(rqst
, (char *)iptr
, rep
->rr_len
, rdmalen
);
835 case htonl(RDMA_NOMSG
):
836 /* never expect read or write chunks, always reply chunks */
837 if (headerp
->rm_body
.rm_chunks
[0] != xdr_zero
||
838 headerp
->rm_body
.rm_chunks
[1] != xdr_zero
||
839 headerp
->rm_body
.rm_chunks
[2] != xdr_one
||
840 req
->rl_nchunks
== 0)
842 iptr
= (__be32
*)((unsigned char *)headerp
+ 28);
843 rdmalen
= rpcrdma_count_chunks(rep
, req
->rl_nchunks
, 0, &iptr
);
846 r_xprt
->rx_stats
.total_rdma_reply
+= rdmalen
;
847 /* Reply chunk buffer already is the reply vector - no fixup. */
853 dprintk("%s: invalid rpcrdma reply header (type %d):"
854 " chunks[012] == %d %d %d"
855 " expected chunks <= %d\n",
856 __func__
, ntohl(headerp
->rm_type
),
857 headerp
->rm_body
.rm_chunks
[0],
858 headerp
->rm_body
.rm_chunks
[1],
859 headerp
->rm_body
.rm_chunks
[2],
862 r_xprt
->rx_stats
.bad_reply_count
++;
867 xprt
->cwnd
= atomic_read(&r_xprt
->rx_buf
.rb_credits
) << RPC_CWNDSHIFT
;
868 if (xprt
->cwnd
> cwnd
)
869 xprt_release_rqst_cong(rqst
->rq_task
);
871 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
872 __func__
, xprt
, rqst
, status
);
873 xprt_complete_rqst(rqst
->rq_task
, status
);
874 spin_unlock(&xprt
->transport_lock
);