svcrdma: Find rmsgp more reliably
[deliverable/linux.git] / net / sunrpc / xprtrdma / rpc_rdma.c
1 /*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
46 */
47
48 #include "xprt_rdma.h"
49
50 #include <linux/highmem.h>
51
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
54 #endif
55
56 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 static const char transfertypes[][12] = {
58 "pure inline", /* no chunks */
59 " read chunk", /* some argument via rdma read */
60 "*read chunk", /* entire request via rdma read */
61 "write chunk", /* some result via rdma write */
62 "reply chunk" /* entire reply via rdma write */
63 };
64 #endif
65
66 /*
67 * Chunk assembly from upper layer xdr_buf.
68 *
69 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
70 * elements. Segments are then coalesced when registered, if possible
71 * within the selected memreg mode.
72 *
73 * Returns positive number of segments converted, or a negative errno.
74 */
75
76 static int
77 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
78 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
79 {
80 int len, n = 0, p;
81 int page_base;
82 struct page **ppages;
83
84 if (pos == 0 && xdrbuf->head[0].iov_len) {
85 seg[n].mr_page = NULL;
86 seg[n].mr_offset = xdrbuf->head[0].iov_base;
87 seg[n].mr_len = xdrbuf->head[0].iov_len;
88 ++n;
89 }
90
91 len = xdrbuf->page_len;
92 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
93 page_base = xdrbuf->page_base & ~PAGE_MASK;
94 p = 0;
95 while (len && n < nsegs) {
96 if (!ppages[p]) {
97 /* alloc the pagelist for receiving buffer */
98 ppages[p] = alloc_page(GFP_ATOMIC);
99 if (!ppages[p])
100 return -ENOMEM;
101 }
102 seg[n].mr_page = ppages[p];
103 seg[n].mr_offset = (void *)(unsigned long) page_base;
104 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
105 if (seg[n].mr_len > PAGE_SIZE)
106 return -EIO;
107 len -= seg[n].mr_len;
108 ++n;
109 ++p;
110 page_base = 0; /* page offset only applies to first page */
111 }
112
113 /* Message overflows the seg array */
114 if (len && n == nsegs)
115 return -EIO;
116
117 if (xdrbuf->tail[0].iov_len) {
118 /* the rpcrdma protocol allows us to omit any trailing
119 * xdr pad bytes, saving the server an RDMA operation. */
120 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
121 return n;
122 if (n == nsegs)
123 /* Tail remains, but we're out of segments */
124 return -EIO;
125 seg[n].mr_page = NULL;
126 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
127 seg[n].mr_len = xdrbuf->tail[0].iov_len;
128 ++n;
129 }
130
131 return n;
132 }
133
134 /*
135 * Create read/write chunk lists, and reply chunks, for RDMA
136 *
137 * Assume check against THRESHOLD has been done, and chunks are required.
138 * Assume only encoding one list entry for read|write chunks. The NFSv3
139 * protocol is simple enough to allow this as it only has a single "bulk
140 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
141 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
142 *
143 * When used for a single reply chunk (which is a special write
144 * chunk used for the entire reply, rather than just the data), it
145 * is used primarily for READDIR and READLINK which would otherwise
146 * be severely size-limited by a small rdma inline read max. The server
147 * response will come back as an RDMA Write, followed by a message
148 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
149 * chunks do not provide data alignment, however they do not require
150 * "fixup" (moving the response to the upper layer buffer) either.
151 *
152 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
153 *
154 * Read chunklist (a linked list):
155 * N elements, position P (same P for all chunks of same arg!):
156 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
157 *
158 * Write chunklist (a list of (one) counted array):
159 * N elements:
160 * 1 - N - HLOO - HLOO - ... - HLOO - 0
161 *
162 * Reply chunk (a counted array):
163 * N elements:
164 * 1 - N - HLOO - HLOO - ... - HLOO
165 *
166 * Returns positive RPC/RDMA header size, or negative errno.
167 */
168
169 static ssize_t
170 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172 {
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
175 int n, nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
182
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184 /* a read chunk - server will RDMA Read our memory */
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187 /* a write or reply chunk - server will RDMA Write our memory */
188 *iptr++ = xdr_zero; /* encode a NULL read chunk list */
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero; /* a NULL write chunk list */
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193 }
194
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
199
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs < 0)
202 return nsegs;
203
204 do {
205 n = rpcrdma_register_external(seg, nsegs,
206 cur_wchunk != NULL, r_xprt);
207 if (n <= 0)
208 goto out;
209 if (cur_rchunk) { /* read */
210 cur_rchunk->rc_discrim = xdr_one;
211 /* all read chunks have the same "position" */
212 cur_rchunk->rc_position = htonl(pos);
213 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
214 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
215 xdr_encode_hyper(
216 (__be32 *)&cur_rchunk->rc_target.rs_offset,
217 seg->mr_base);
218 dprintk("RPC: %s: read chunk "
219 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
220 seg->mr_len, (unsigned long long)seg->mr_base,
221 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
222 cur_rchunk++;
223 r_xprt->rx_stats.read_chunk_count++;
224 } else { /* write/reply */
225 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
226 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
227 xdr_encode_hyper(
228 (__be32 *)&cur_wchunk->wc_target.rs_offset,
229 seg->mr_base);
230 dprintk("RPC: %s: %s chunk "
231 "elem %d@0x%llx:0x%x (%s)\n", __func__,
232 (type == rpcrdma_replych) ? "reply" : "write",
233 seg->mr_len, (unsigned long long)seg->mr_base,
234 seg->mr_rkey, n < nsegs ? "more" : "last");
235 cur_wchunk++;
236 if (type == rpcrdma_replych)
237 r_xprt->rx_stats.reply_chunk_count++;
238 else
239 r_xprt->rx_stats.write_chunk_count++;
240 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
241 }
242 nchunks++;
243 seg += n;
244 nsegs -= n;
245 } while (nsegs);
246
247 /* success. all failures return above */
248 req->rl_nchunks = nchunks;
249
250 /*
251 * finish off header. If write, marshal discrim and nchunks.
252 */
253 if (cur_rchunk) {
254 iptr = (__be32 *) cur_rchunk;
255 *iptr++ = xdr_zero; /* finish the read chunk list */
256 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
257 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
258 } else {
259 warray->wc_discrim = xdr_one;
260 warray->wc_nchunks = htonl(nchunks);
261 iptr = (__be32 *) cur_wchunk;
262 if (type == rpcrdma_writech) {
263 *iptr++ = xdr_zero; /* finish the write chunk list */
264 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
265 }
266 }
267
268 /*
269 * Return header size.
270 */
271 return (unsigned char *)iptr - (unsigned char *)headerp;
272
273 out:
274 if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) {
275 for (pos = 0; nchunks--;)
276 pos += rpcrdma_deregister_external(
277 &req->rl_segments[pos], r_xprt);
278 }
279 return n;
280 }
281
282 /*
283 * Marshal chunks. This routine returns the header length
284 * consumed by marshaling.
285 *
286 * Returns positive RPC/RDMA header size, or negative errno.
287 */
288
289 ssize_t
290 rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
291 {
292 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
293 struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
294
295 if (req->rl_rtype != rpcrdma_noch)
296 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
297 headerp, req->rl_rtype);
298 else if (req->rl_wtype != rpcrdma_noch)
299 result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
300 headerp, req->rl_wtype);
301 return result;
302 }
303
304 /*
305 * Copy write data inline.
306 * This function is used for "small" requests. Data which is passed
307 * to RPC via iovecs (or page list) is copied directly into the
308 * pre-registered memory buffer for this request. For small amounts
309 * of data, this is efficient. The cutoff value is tunable.
310 */
311 static int
312 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
313 {
314 int i, npages, curlen;
315 int copy_len;
316 unsigned char *srcp, *destp;
317 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
318 int page_base;
319 struct page **ppages;
320
321 destp = rqst->rq_svec[0].iov_base;
322 curlen = rqst->rq_svec[0].iov_len;
323 destp += curlen;
324 /*
325 * Do optional padding where it makes sense. Alignment of write
326 * payload can help the server, if our setting is accurate.
327 */
328 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
329 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
330 pad = 0; /* don't pad this request */
331
332 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
333 __func__, pad, destp, rqst->rq_slen, curlen);
334
335 copy_len = rqst->rq_snd_buf.page_len;
336
337 if (rqst->rq_snd_buf.tail[0].iov_len) {
338 curlen = rqst->rq_snd_buf.tail[0].iov_len;
339 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
340 memmove(destp + copy_len,
341 rqst->rq_snd_buf.tail[0].iov_base, curlen);
342 r_xprt->rx_stats.pullup_copy_count += curlen;
343 }
344 dprintk("RPC: %s: tail destp 0x%p len %d\n",
345 __func__, destp + copy_len, curlen);
346 rqst->rq_svec[0].iov_len += curlen;
347 }
348 r_xprt->rx_stats.pullup_copy_count += copy_len;
349
350 page_base = rqst->rq_snd_buf.page_base;
351 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
352 page_base &= ~PAGE_MASK;
353 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
354 for (i = 0; copy_len && i < npages; i++) {
355 curlen = PAGE_SIZE - page_base;
356 if (curlen > copy_len)
357 curlen = copy_len;
358 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
359 __func__, i, destp, copy_len, curlen);
360 srcp = kmap_atomic(ppages[i]);
361 memcpy(destp, srcp+page_base, curlen);
362 kunmap_atomic(srcp);
363 rqst->rq_svec[0].iov_len += curlen;
364 destp += curlen;
365 copy_len -= curlen;
366 page_base = 0;
367 }
368 /* header now contains entire send message */
369 return pad;
370 }
371
372 /*
373 * Marshal a request: the primary job of this routine is to choose
374 * the transfer modes. See comments below.
375 *
376 * Uses multiple RDMA IOVs for a request:
377 * [0] -- RPC RDMA header, which uses memory from the *start* of the
378 * preregistered buffer that already holds the RPC data in
379 * its middle.
380 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
381 * [2] -- optional padding.
382 * [3] -- if padded, header only in [1] and data here.
383 *
384 * Returns zero on success, otherwise a negative errno.
385 */
386
387 int
388 rpcrdma_marshal_req(struct rpc_rqst *rqst)
389 {
390 struct rpc_xprt *xprt = rqst->rq_xprt;
391 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
392 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
393 char *base;
394 size_t rpclen, padlen;
395 ssize_t hdrlen;
396 struct rpcrdma_msg *headerp;
397
398 /*
399 * rpclen gets amount of data in first buffer, which is the
400 * pre-registered buffer.
401 */
402 base = rqst->rq_svec[0].iov_base;
403 rpclen = rqst->rq_svec[0].iov_len;
404
405 /* build RDMA header in private area at front */
406 headerp = (struct rpcrdma_msg *) req->rl_base;
407 /* don't htonl XID, it's already done in request */
408 headerp->rm_xid = rqst->rq_xid;
409 headerp->rm_vers = xdr_one;
410 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
411 headerp->rm_type = htonl(RDMA_MSG);
412
413 /*
414 * Chunks needed for results?
415 *
416 * o If the expected result is under the inline threshold, all ops
417 * return as inline (but see later).
418 * o Large non-read ops return as a single reply chunk.
419 * o Large read ops return data as write chunk(s), header as inline.
420 *
421 * Note: the NFS code sending down multiple result segments implies
422 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
423 */
424
425 /*
426 * This code can handle read chunks, write chunks OR reply
427 * chunks -- only one type. If the request is too big to fit
428 * inline, then we will choose read chunks. If the request is
429 * a READ, then use write chunks to separate the file data
430 * into pages; otherwise use reply chunks.
431 */
432 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
433 req->rl_wtype = rpcrdma_noch;
434 else if (rqst->rq_rcv_buf.page_len == 0)
435 req->rl_wtype = rpcrdma_replych;
436 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
437 req->rl_wtype = rpcrdma_writech;
438 else
439 req->rl_wtype = rpcrdma_replych;
440
441 /*
442 * Chunks needed for arguments?
443 *
444 * o If the total request is under the inline threshold, all ops
445 * are sent as inline.
446 * o Large non-write ops are sent with the entire message as a
447 * single read chunk (protocol 0-position special case).
448 * o Large write ops transmit data as read chunk(s), header as
449 * inline.
450 *
451 * Note: the NFS code sending down multiple argument segments
452 * implies the op is a write.
453 * TBD check NFSv4 setacl
454 */
455 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
456 req->rl_rtype = rpcrdma_noch;
457 else if (rqst->rq_snd_buf.page_len == 0)
458 req->rl_rtype = rpcrdma_areadch;
459 else
460 req->rl_rtype = rpcrdma_readch;
461
462 /* The following simplification is not true forever */
463 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
464 req->rl_wtype = rpcrdma_noch;
465 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
466 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
467 __func__);
468 return -EIO;
469 }
470
471 hdrlen = 28; /*sizeof *headerp;*/
472 padlen = 0;
473
474 /*
475 * Pull up any extra send data into the preregistered buffer.
476 * When padding is in use and applies to the transfer, insert
477 * it and change the message type.
478 */
479 if (req->rl_rtype == rpcrdma_noch) {
480
481 padlen = rpcrdma_inline_pullup(rqst,
482 RPCRDMA_INLINE_PAD_VALUE(rqst));
483
484 if (padlen) {
485 headerp->rm_type = htonl(RDMA_MSGP);
486 headerp->rm_body.rm_padded.rm_align =
487 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
488 headerp->rm_body.rm_padded.rm_thresh =
489 htonl(RPCRDMA_INLINE_PAD_THRESH);
490 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
491 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
492 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
493 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
494 if (req->rl_wtype != rpcrdma_noch) {
495 dprintk("RPC: %s: invalid chunk list\n",
496 __func__);
497 return -EIO;
498 }
499 } else {
500 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
501 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
502 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
503 /* new length after pullup */
504 rpclen = rqst->rq_svec[0].iov_len;
505 /*
506 * Currently we try to not actually use read inline.
507 * Reply chunks have the desirable property that
508 * they land, packed, directly in the target buffers
509 * without headers, so they require no fixup. The
510 * additional RDMA Write op sends the same amount
511 * of data, streams on-the-wire and adds no overhead
512 * on receive. Therefore, we request a reply chunk
513 * for non-writes wherever feasible and efficient.
514 */
515 if (req->rl_wtype == rpcrdma_noch)
516 req->rl_wtype = rpcrdma_replych;
517 }
518 }
519
520 hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
521 if (hdrlen < 0)
522 return hdrlen;
523
524 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
525 " headerp 0x%p base 0x%p lkey 0x%x\n",
526 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
527 headerp, base, req->rl_iov.lkey);
528
529 /*
530 * initialize send_iov's - normally only two: rdma chunk header and
531 * single preregistered RPC header buffer, but if padding is present,
532 * then use a preregistered (and zeroed) pad buffer between the RPC
533 * header and any write data. In all non-rdma cases, any following
534 * data has been copied into the RPC header buffer.
535 */
536 req->rl_send_iov[0].addr = req->rl_iov.addr;
537 req->rl_send_iov[0].length = hdrlen;
538 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
539
540 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
541 req->rl_send_iov[1].length = rpclen;
542 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
543
544 req->rl_niovs = 2;
545
546 if (padlen) {
547 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
548
549 req->rl_send_iov[2].addr = ep->rep_pad.addr;
550 req->rl_send_iov[2].length = padlen;
551 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
552
553 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
554 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
555 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
556
557 req->rl_niovs = 4;
558 }
559
560 return 0;
561 }
562
563 /*
564 * Chase down a received write or reply chunklist to get length
565 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
566 */
567 static int
568 rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
569 {
570 unsigned int i, total_len;
571 struct rpcrdma_write_chunk *cur_wchunk;
572
573 i = ntohl(**iptrp); /* get array count */
574 if (i > max)
575 return -1;
576 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
577 total_len = 0;
578 while (i--) {
579 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
580 ifdebug(FACILITY) {
581 u64 off;
582 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
583 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
584 __func__,
585 ntohl(seg->rs_length),
586 (unsigned long long)off,
587 ntohl(seg->rs_handle));
588 }
589 total_len += ntohl(seg->rs_length);
590 ++cur_wchunk;
591 }
592 /* check and adjust for properly terminated write chunk */
593 if (wrchunk) {
594 __be32 *w = (__be32 *) cur_wchunk;
595 if (*w++ != xdr_zero)
596 return -1;
597 cur_wchunk = (struct rpcrdma_write_chunk *) w;
598 }
599 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
600 return -1;
601
602 *iptrp = (__be32 *) cur_wchunk;
603 return total_len;
604 }
605
606 /*
607 * Scatter inline received data back into provided iov's.
608 */
609 static void
610 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
611 {
612 int i, npages, curlen, olen;
613 char *destp;
614 struct page **ppages;
615 int page_base;
616
617 curlen = rqst->rq_rcv_buf.head[0].iov_len;
618 if (curlen > copy_len) { /* write chunk header fixup */
619 curlen = copy_len;
620 rqst->rq_rcv_buf.head[0].iov_len = curlen;
621 }
622
623 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
624 __func__, srcp, copy_len, curlen);
625
626 /* Shift pointer for first receive segment only */
627 rqst->rq_rcv_buf.head[0].iov_base = srcp;
628 srcp += curlen;
629 copy_len -= curlen;
630
631 olen = copy_len;
632 i = 0;
633 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
634 page_base = rqst->rq_rcv_buf.page_base;
635 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
636 page_base &= ~PAGE_MASK;
637
638 if (copy_len && rqst->rq_rcv_buf.page_len) {
639 npages = PAGE_ALIGN(page_base +
640 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
641 for (; i < npages; i++) {
642 curlen = PAGE_SIZE - page_base;
643 if (curlen > copy_len)
644 curlen = copy_len;
645 dprintk("RPC: %s: page %d"
646 " srcp 0x%p len %d curlen %d\n",
647 __func__, i, srcp, copy_len, curlen);
648 destp = kmap_atomic(ppages[i]);
649 memcpy(destp + page_base, srcp, curlen);
650 flush_dcache_page(ppages[i]);
651 kunmap_atomic(destp);
652 srcp += curlen;
653 copy_len -= curlen;
654 if (copy_len == 0)
655 break;
656 page_base = 0;
657 }
658 }
659
660 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
661 curlen = copy_len;
662 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
663 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
664 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
665 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
666 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
667 __func__, srcp, copy_len, curlen);
668 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
669 copy_len -= curlen; ++i;
670 } else
671 rqst->rq_rcv_buf.tail[0].iov_len = 0;
672
673 if (pad) {
674 /* implicit padding on terminal chunk */
675 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
676 while (pad--)
677 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
678 }
679
680 if (copy_len)
681 dprintk("RPC: %s: %d bytes in"
682 " %d extra segments (%d lost)\n",
683 __func__, olen, i, copy_len);
684
685 /* TBD avoid a warning from call_decode() */
686 rqst->rq_private_buf = rqst->rq_rcv_buf;
687 }
688
689 void
690 rpcrdma_connect_worker(struct work_struct *work)
691 {
692 struct rpcrdma_ep *ep =
693 container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
694 struct rpc_xprt *xprt = ep->rep_xprt;
695
696 spin_lock_bh(&xprt->transport_lock);
697 if (++xprt->connect_cookie == 0) /* maintain a reserved value */
698 ++xprt->connect_cookie;
699 if (ep->rep_connected > 0) {
700 if (!xprt_test_and_set_connected(xprt))
701 xprt_wake_pending_tasks(xprt, 0);
702 } else {
703 if (xprt_test_and_clear_connected(xprt))
704 xprt_wake_pending_tasks(xprt, -ENOTCONN);
705 }
706 spin_unlock_bh(&xprt->transport_lock);
707 }
708
709 /*
710 * This function is called when an async event is posted to
711 * the connection which changes the connection state. All it
712 * does at this point is mark the connection up/down, the rpc
713 * timers do the rest.
714 */
715 void
716 rpcrdma_conn_func(struct rpcrdma_ep *ep)
717 {
718 schedule_delayed_work(&ep->rep_connect_worker, 0);
719 }
720
721 /*
722 * Called as a tasklet to do req/reply match and complete a request
723 * Errors must result in the RPC task either being awakened, or
724 * allowed to timeout, to discover the errors at that time.
725 */
726 void
727 rpcrdma_reply_handler(struct rpcrdma_rep *rep)
728 {
729 struct rpcrdma_msg *headerp;
730 struct rpcrdma_req *req;
731 struct rpc_rqst *rqst;
732 struct rpc_xprt *xprt = rep->rr_xprt;
733 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
734 __be32 *iptr;
735 int rdmalen, status;
736 unsigned long cwnd;
737
738 /* Check status. If bad, signal disconnect and return rep to pool */
739 if (rep->rr_len == ~0U) {
740 rpcrdma_recv_buffer_put(rep);
741 if (r_xprt->rx_ep.rep_connected == 1) {
742 r_xprt->rx_ep.rep_connected = -EIO;
743 rpcrdma_conn_func(&r_xprt->rx_ep);
744 }
745 return;
746 }
747 if (rep->rr_len < 28) {
748 dprintk("RPC: %s: short/invalid reply\n", __func__);
749 goto repost;
750 }
751 headerp = (struct rpcrdma_msg *) rep->rr_base;
752 if (headerp->rm_vers != xdr_one) {
753 dprintk("RPC: %s: invalid version %d\n",
754 __func__, ntohl(headerp->rm_vers));
755 goto repost;
756 }
757
758 /* Get XID and try for a match. */
759 spin_lock(&xprt->transport_lock);
760 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
761 if (rqst == NULL) {
762 spin_unlock(&xprt->transport_lock);
763 dprintk("RPC: %s: reply 0x%p failed "
764 "to match any request xid 0x%08x len %d\n",
765 __func__, rep, headerp->rm_xid, rep->rr_len);
766 repost:
767 r_xprt->rx_stats.bad_reply_count++;
768 rep->rr_func = rpcrdma_reply_handler;
769 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
770 rpcrdma_recv_buffer_put(rep);
771
772 return;
773 }
774
775 /* get request object */
776 req = rpcr_to_rdmar(rqst);
777 if (req->rl_reply) {
778 spin_unlock(&xprt->transport_lock);
779 dprintk("RPC: %s: duplicate reply 0x%p to RPC "
780 "request 0x%p: xid 0x%08x\n", __func__, rep, req,
781 headerp->rm_xid);
782 goto repost;
783 }
784
785 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
786 " RPC request 0x%p xid 0x%08x\n",
787 __func__, rep, req, rqst, headerp->rm_xid);
788
789 /* from here on, the reply is no longer an orphan */
790 req->rl_reply = rep;
791 xprt->reestablish_timeout = 0;
792
793 /* check for expected message types */
794 /* The order of some of these tests is important. */
795 switch (headerp->rm_type) {
796 case htonl(RDMA_MSG):
797 /* never expect read chunks */
798 /* never expect reply chunks (two ways to check) */
799 /* never expect write chunks without having offered RDMA */
800 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
801 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
802 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
803 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
804 req->rl_nchunks == 0))
805 goto badheader;
806 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
807 /* count any expected write chunks in read reply */
808 /* start at write chunk array count */
809 iptr = &headerp->rm_body.rm_chunks[2];
810 rdmalen = rpcrdma_count_chunks(rep,
811 req->rl_nchunks, 1, &iptr);
812 /* check for validity, and no reply chunk after */
813 if (rdmalen < 0 || *iptr++ != xdr_zero)
814 goto badheader;
815 rep->rr_len -=
816 ((unsigned char *)iptr - (unsigned char *)headerp);
817 status = rep->rr_len + rdmalen;
818 r_xprt->rx_stats.total_rdma_reply += rdmalen;
819 /* special case - last chunk may omit padding */
820 if (rdmalen &= 3) {
821 rdmalen = 4 - rdmalen;
822 status += rdmalen;
823 }
824 } else {
825 /* else ordinary inline */
826 rdmalen = 0;
827 iptr = (__be32 *)((unsigned char *)headerp + 28);
828 rep->rr_len -= 28; /*sizeof *headerp;*/
829 status = rep->rr_len;
830 }
831 /* Fix up the rpc results for upper layer */
832 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
833 break;
834
835 case htonl(RDMA_NOMSG):
836 /* never expect read or write chunks, always reply chunks */
837 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
838 headerp->rm_body.rm_chunks[1] != xdr_zero ||
839 headerp->rm_body.rm_chunks[2] != xdr_one ||
840 req->rl_nchunks == 0)
841 goto badheader;
842 iptr = (__be32 *)((unsigned char *)headerp + 28);
843 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
844 if (rdmalen < 0)
845 goto badheader;
846 r_xprt->rx_stats.total_rdma_reply += rdmalen;
847 /* Reply chunk buffer already is the reply vector - no fixup. */
848 status = rdmalen;
849 break;
850
851 badheader:
852 default:
853 dprintk("%s: invalid rpcrdma reply header (type %d):"
854 " chunks[012] == %d %d %d"
855 " expected chunks <= %d\n",
856 __func__, ntohl(headerp->rm_type),
857 headerp->rm_body.rm_chunks[0],
858 headerp->rm_body.rm_chunks[1],
859 headerp->rm_body.rm_chunks[2],
860 req->rl_nchunks);
861 status = -EIO;
862 r_xprt->rx_stats.bad_reply_count++;
863 break;
864 }
865
866 cwnd = xprt->cwnd;
867 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
868 if (xprt->cwnd > cwnd)
869 xprt_release_rqst_cong(rqst->rq_task);
870
871 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
872 __func__, xprt, rqst, status);
873 xprt_complete_rqst(rqst->rq_task, status);
874 spin_unlock(&xprt->transport_lock);
875 }
This page took 0.048291 seconds and 5 git commands to generate.