svcrdma: Clean up read chunk counting
[deliverable/linux.git] / net / sunrpc / xprtrdma / svc_rdma_sendto.c
CommitLineData
c06b540a 1/*
0bf48289 2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
c06b540a
TT
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
afd566ea
TT
53static int map_xdr(struct svcxprt_rdma *xprt,
54 struct xdr_buf *xdr,
55 struct svc_rdma_req_map *vec)
c06b540a 56{
c06b540a 57 int sge_no;
c06b540a
TT
58 u32 sge_bytes;
59 u32 page_bytes;
34d16e42 60 u32 page_off;
c06b540a
TT
61 int page_no;
62
34d16e42
TT
63 BUG_ON(xdr->len !=
64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
65
c06b540a
TT
66 /* Skip the first sge, this is for the RPCRDMA header */
67 sge_no = 1;
68
69 /* Head SGE */
34d16e42
TT
70 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
71 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
c06b540a
TT
72 sge_no++;
73
74 /* pages SGE */
75 page_no = 0;
76 page_bytes = xdr->page_len;
77 page_off = xdr->page_base;
34d16e42
TT
78 while (page_bytes) {
79 vec->sge[sge_no].iov_base =
80 page_address(xdr->pages[page_no]) + page_off;
81 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
c06b540a 82 page_bytes -= sge_bytes;
34d16e42 83 vec->sge[sge_no].iov_len = sge_bytes;
c06b540a
TT
84
85 sge_no++;
86 page_no++;
87 page_off = 0; /* reset for next time through loop */
88 }
89
90 /* Tail SGE */
34d16e42
TT
91 if (xdr->tail[0].iov_len) {
92 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
93 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
c06b540a
TT
94 sge_no++;
95 }
96
b1e1e158 97 dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
2e3c230b 98 "page_base %u page_len %u head_len %zu tail_len %zu\n",
b1e1e158
TT
99 sge_no, page_no, xdr->page_base, xdr->page_len,
100 xdr->head[0].iov_len, xdr->tail[0].iov_len);
101
34d16e42 102 vec->count = sge_no;
afd566ea 103 return 0;
c06b540a
TT
104}
105
b432e6b3
TT
106static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
107 struct xdr_buf *xdr,
108 u32 xdr_off, size_t len, int dir)
109{
110 struct page *page;
111 dma_addr_t dma_addr;
112 if (xdr_off < xdr->head[0].iov_len) {
113 /* This offset is in the head */
114 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
115 page = virt_to_page(xdr->head[0].iov_base);
116 } else {
117 xdr_off -= xdr->head[0].iov_len;
118 if (xdr_off < xdr->page_len) {
119 /* This offset is in the page list */
3cbe01a9 120 xdr_off += xdr->page_base;
b432e6b3
TT
121 page = xdr->pages[xdr_off >> PAGE_SHIFT];
122 xdr_off &= ~PAGE_MASK;
123 } else {
124 /* This offset is in the tail */
125 xdr_off -= xdr->page_len;
126 xdr_off += (unsigned long)
127 xdr->tail[0].iov_base & ~PAGE_MASK;
128 page = virt_to_page(xdr->tail[0].iov_base);
129 }
130 }
131 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
132 min_t(size_t, PAGE_SIZE, len), dir);
133 return dma_addr;
134}
135
c06b540a
TT
136/* Assumptions:
137 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
138 */
139static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
140 u32 rmr, u64 to,
141 u32 xdr_off, int write_len,
34d16e42 142 struct svc_rdma_req_map *vec)
c06b540a 143{
c06b540a
TT
144 struct ib_send_wr write_wr;
145 struct ib_sge *sge;
146 int xdr_sge_no;
147 int sge_no;
148 int sge_bytes;
149 int sge_off;
150 int bc;
151 struct svc_rdma_op_ctxt *ctxt;
c06b540a 152
34d16e42 153 BUG_ON(vec->count > RPCSVC_MAXPAGES);
c06b540a 154 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
34d16e42 155 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
bb50c801 156 rmr, (unsigned long long)to, xdr_off,
34d16e42 157 write_len, vec->sge, vec->count);
c06b540a
TT
158
159 ctxt = svc_rdma_get_context(xprt);
34d16e42
TT
160 ctxt->direction = DMA_TO_DEVICE;
161 sge = ctxt->sge;
c06b540a
TT
162
163 /* Find the SGE associated with xdr_off */
34d16e42 164 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
c06b540a 165 xdr_sge_no++) {
34d16e42 166 if (vec->sge[xdr_sge_no].iov_len > bc)
c06b540a 167 break;
34d16e42 168 bc -= vec->sge[xdr_sge_no].iov_len;
c06b540a
TT
169 }
170
171 sge_off = bc;
172 bc = write_len;
173 sge_no = 0;
174
175 /* Copy the remaining SGE */
afd566ea
TT
176 while (bc != 0) {
177 sge_bytes = min_t(size_t,
178 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
c06b540a 179 sge[sge_no].length = sge_bytes;
0bf48289
SW
180 sge[sge_no].addr =
181 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
182 sge_bytes, DMA_TO_DEVICE);
183 xdr_off += sge_bytes;
184 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
185 sge[sge_no].addr))
186 goto err;
187 atomic_inc(&xprt->sc_dma_used);
188 sge[sge_no].lkey = xprt->sc_dma_lkey;
afd566ea 189 ctxt->count++;
c06b540a
TT
190 sge_off = 0;
191 sge_no++;
192 xdr_sge_no++;
afd566ea 193 BUG_ON(xdr_sge_no > vec->count);
c06b540a 194 bc -= sge_bytes;
25594290
SW
195 if (sge_no == xprt->sc_max_sge)
196 break;
c06b540a
TT
197 }
198
c06b540a
TT
199 /* Prepare WRITE WR */
200 memset(&write_wr, 0, sizeof write_wr);
201 ctxt->wr_op = IB_WR_RDMA_WRITE;
202 write_wr.wr_id = (unsigned long)ctxt;
203 write_wr.sg_list = &sge[0];
204 write_wr.num_sge = sge_no;
205 write_wr.opcode = IB_WR_RDMA_WRITE;
206 write_wr.send_flags = IB_SEND_SIGNALED;
207 write_wr.wr.rdma.rkey = rmr;
208 write_wr.wr.rdma.remote_addr = to;
209
210 /* Post It */
211 atomic_inc(&rdma_stat_write);
34d16e42
TT
212 if (svc_rdma_send(xprt, &write_wr))
213 goto err;
25594290 214 return write_len - bc;
34d16e42 215 err:
4a84386f 216 svc_rdma_unmap_dma(ctxt);
34d16e42
TT
217 svc_rdma_put_context(ctxt, 0);
218 /* Fatal error, close transport */
219 return -EIO;
c06b540a
TT
220}
221
222static int send_write_chunks(struct svcxprt_rdma *xprt,
223 struct rpcrdma_msg *rdma_argp,
224 struct rpcrdma_msg *rdma_resp,
225 struct svc_rqst *rqstp,
34d16e42 226 struct svc_rdma_req_map *vec)
c06b540a
TT
227{
228 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
229 int write_len;
c06b540a
TT
230 u32 xdr_off;
231 int chunk_off;
232 int chunk_no;
233 struct rpcrdma_write_array *arg_ary;
234 struct rpcrdma_write_array *res_ary;
235 int ret;
236
237 arg_ary = svc_rdma_get_write_array(rdma_argp);
238 if (!arg_ary)
239 return 0;
240 res_ary = (struct rpcrdma_write_array *)
241 &rdma_resp->rm_body.rm_chunks[1];
242
c06b540a
TT
243 /* Write chunks start at the pagelist */
244 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
245 xfer_len && chunk_no < arg_ary->wc_nchunks;
246 chunk_no++) {
247 struct rpcrdma_segment *arg_ch;
248 u64 rs_offset;
249
250 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
cec56c8f 251 write_len = min(xfer_len, ntohl(arg_ch->rs_length));
c06b540a
TT
252
253 /* Prepare the response chunk given the length actually
254 * written */
cec56c8f 255 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
c06b540a 256 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
cec56c8f
TT
257 arg_ch->rs_handle,
258 arg_ch->rs_offset,
259 write_len);
c06b540a
TT
260 chunk_off = 0;
261 while (write_len) {
c06b540a 262 ret = send_write(xprt, rqstp,
cec56c8f 263 ntohl(arg_ch->rs_handle),
c06b540a
TT
264 rs_offset + chunk_off,
265 xdr_off,
25594290 266 write_len,
34d16e42 267 vec);
25594290 268 if (ret <= 0) {
c06b540a
TT
269 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
270 ret);
271 return -EIO;
272 }
25594290
SW
273 chunk_off += ret;
274 xdr_off += ret;
275 xfer_len -= ret;
276 write_len -= ret;
c06b540a
TT
277 }
278 }
279 /* Update the req with the number of chunks actually used */
280 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
281
282 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
283}
284
285static int send_reply_chunks(struct svcxprt_rdma *xprt,
286 struct rpcrdma_msg *rdma_argp,
287 struct rpcrdma_msg *rdma_resp,
288 struct svc_rqst *rqstp,
34d16e42 289 struct svc_rdma_req_map *vec)
c06b540a
TT
290{
291 u32 xfer_len = rqstp->rq_res.len;
292 int write_len;
c06b540a
TT
293 u32 xdr_off;
294 int chunk_no;
295 int chunk_off;
cec56c8f 296 int nchunks;
c06b540a
TT
297 struct rpcrdma_segment *ch;
298 struct rpcrdma_write_array *arg_ary;
299 struct rpcrdma_write_array *res_ary;
300 int ret;
301
302 arg_ary = svc_rdma_get_reply_array(rdma_argp);
303 if (!arg_ary)
304 return 0;
305 /* XXX: need to fix when reply lists occur with read-list and or
306 * write-list */
307 res_ary = (struct rpcrdma_write_array *)
308 &rdma_resp->rm_body.rm_chunks[2];
309
c06b540a 310 /* xdr offset starts at RPC message */
cec56c8f 311 nchunks = ntohl(arg_ary->wc_nchunks);
c06b540a 312 for (xdr_off = 0, chunk_no = 0;
cec56c8f 313 xfer_len && chunk_no < nchunks;
c06b540a
TT
314 chunk_no++) {
315 u64 rs_offset;
316 ch = &arg_ary->wc_array[chunk_no].wc_target;
cec56c8f 317 write_len = min(xfer_len, htonl(ch->rs_length));
c06b540a 318
c06b540a
TT
319 /* Prepare the reply chunk given the length actually
320 * written */
cec56c8f 321 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
c06b540a 322 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
cec56c8f
TT
323 ch->rs_handle, ch->rs_offset,
324 write_len);
c06b540a
TT
325 chunk_off = 0;
326 while (write_len) {
c06b540a 327 ret = send_write(xprt, rqstp,
cec56c8f 328 ntohl(ch->rs_handle),
c06b540a
TT
329 rs_offset + chunk_off,
330 xdr_off,
25594290 331 write_len,
34d16e42 332 vec);
25594290 333 if (ret <= 0) {
c06b540a
TT
334 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
335 ret);
336 return -EIO;
337 }
25594290
SW
338 chunk_off += ret;
339 xdr_off += ret;
340 xfer_len -= ret;
341 write_len -= ret;
c06b540a
TT
342 }
343 }
344 /* Update the req with the number of chunks actually used */
345 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
346
347 return rqstp->rq_res.len;
348}
349
350/* This function prepares the portion of the RPCRDMA message to be
351 * sent in the RDMA_SEND. This function is called after data sent via
352 * RDMA has already been transmitted. There are three cases:
353 * - The RPCRDMA header, RPC header, and payload are all sent in a
354 * single RDMA_SEND. This is the "inline" case.
355 * - The RPCRDMA header and some portion of the RPC header and data
356 * are sent via this RDMA_SEND and another portion of the data is
357 * sent via RDMA.
358 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
359 * header and data are all transmitted via RDMA.
360 * In all three cases, this function prepares the RPCRDMA header in
361 * sge[0], the 'type' parameter indicates the type to place in the
362 * RPCRDMA header, and the 'byte_count' field indicates how much of
b432e6b3
TT
363 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
364 * to send is zero in the XDR.
c06b540a
TT
365 */
366static int send_reply(struct svcxprt_rdma *rdma,
367 struct svc_rqst *rqstp,
368 struct page *page,
369 struct rpcrdma_msg *rdma_resp,
370 struct svc_rdma_op_ctxt *ctxt,
34d16e42 371 struct svc_rdma_req_map *vec,
c06b540a
TT
372 int byte_count)
373{
374 struct ib_send_wr send_wr;
375 int sge_no;
376 int sge_bytes;
377 int page_no;
afc59400 378 int pages;
c06b540a
TT
379 int ret;
380
0e7f011a
TT
381 /* Post a recv buffer to handle another request. */
382 ret = svc_rdma_post_recv(rdma);
383 if (ret) {
384 printk(KERN_INFO
385 "svcrdma: could not post a receive buffer, err=%d."
386 "Closing transport %p.\n", ret, rdma);
387 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
5ac461a6
TT
388 svc_rdma_put_context(ctxt, 0);
389 return -ENOTCONN;
0e7f011a
TT
390 }
391
c06b540a
TT
392 /* Prepare the context */
393 ctxt->pages[0] = page;
394 ctxt->count = 1;
395
396 /* Prepare the SGE for the RPCRDMA Header */
98779be8
SW
397 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
398 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
c06b540a 399 ctxt->sge[0].addr =
b432e6b3
TT
400 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
401 ctxt->sge[0].length, DMA_TO_DEVICE);
afd566ea
TT
402 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
403 goto err;
404 atomic_inc(&rdma->sc_dma_used);
405
c06b540a 406 ctxt->direction = DMA_TO_DEVICE;
afd566ea 407
b432e6b3 408 /* Map the payload indicated by 'byte_count' */
34d16e42 409 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
b432e6b3 410 int xdr_off = 0;
34d16e42 411 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
c06b540a 412 byte_count -= sge_bytes;
0bf48289
SW
413 ctxt->sge[sge_no].addr =
414 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
415 sge_bytes, DMA_TO_DEVICE);
416 xdr_off += sge_bytes;
417 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
418 ctxt->sge[sge_no].addr))
419 goto err;
420 atomic_inc(&rdma->sc_dma_used);
421 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
34d16e42 422 ctxt->sge[sge_no].length = sge_bytes;
c06b540a
TT
423 }
424 BUG_ON(byte_count != 0);
425
426 /* Save all respages in the ctxt and remove them from the
427 * respages array. They are our pages until the I/O
428 * completes.
429 */
afc59400
BF
430 pages = rqstp->rq_next_page - rqstp->rq_respages;
431 for (page_no = 0; page_no < pages; page_no++) {
c06b540a
TT
432 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
433 ctxt->count++;
434 rqstp->rq_respages[page_no] = NULL;
afd566ea
TT
435 /*
436 * If there are more pages than SGE, terminate SGE
437 * list so that svc_rdma_unmap_dma doesn't attempt to
438 * unmap garbage.
439 */
34d16e42
TT
440 if (page_no+1 >= sge_no)
441 ctxt->sge[page_no+1].length = 0;
c06b540a 442 }
7e4359e2 443 rqstp->rq_next_page = rqstp->rq_respages + 1;
0bf48289 444
c06b540a
TT
445 BUG_ON(sge_no > rdma->sc_max_sge);
446 memset(&send_wr, 0, sizeof send_wr);
447 ctxt->wr_op = IB_WR_SEND;
448 send_wr.wr_id = (unsigned long)ctxt;
449 send_wr.sg_list = ctxt->sge;
450 send_wr.num_sge = sge_no;
451 send_wr.opcode = IB_WR_SEND;
452 send_wr.send_flags = IB_SEND_SIGNALED;
453
454 ret = svc_rdma_send(rdma, &send_wr);
455 if (ret)
afd566ea 456 goto err;
c06b540a 457
afd566ea
TT
458 return 0;
459
460 err:
21515e46 461 svc_rdma_unmap_dma(ctxt);
afd566ea
TT
462 svc_rdma_put_context(ctxt, 1);
463 return -EIO;
c06b540a
TT
464}
465
466void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
467{
468}
469
470/*
471 * Return the start of an xdr buffer.
472 */
473static void *xdr_start(struct xdr_buf *xdr)
474{
475 return xdr->head[0].iov_base -
476 (xdr->len -
477 xdr->page_len -
478 xdr->tail[0].iov_len -
479 xdr->head[0].iov_len);
480}
481
482int svc_rdma_sendto(struct svc_rqst *rqstp)
483{
484 struct svc_xprt *xprt = rqstp->rq_xprt;
485 struct svcxprt_rdma *rdma =
486 container_of(xprt, struct svcxprt_rdma, sc_xprt);
487 struct rpcrdma_msg *rdma_argp;
488 struct rpcrdma_msg *rdma_resp;
489 struct rpcrdma_write_array *reply_ary;
490 enum rpcrdma_proc reply_type;
491 int ret;
492 int inline_bytes;
c06b540a
TT
493 struct page *res_page;
494 struct svc_rdma_op_ctxt *ctxt;
34d16e42 495 struct svc_rdma_req_map *vec;
c06b540a
TT
496
497 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
498
499 /* Get the RDMA request header. */
500 rdma_argp = xdr_start(&rqstp->rq_arg);
501
34d16e42 502 /* Build an req vec for the XDR */
c06b540a
TT
503 ctxt = svc_rdma_get_context(rdma);
504 ctxt->direction = DMA_TO_DEVICE;
34d16e42 505 vec = svc_rdma_get_req_map();
afd566ea
TT
506 ret = map_xdr(rdma, &rqstp->rq_res, vec);
507 if (ret)
508 goto err0;
c06b540a
TT
509 inline_bytes = rqstp->rq_res.len;
510
511 /* Create the RDMA response header */
512 res_page = svc_rdma_get_page();
513 rdma_resp = page_address(res_page);
514 reply_ary = svc_rdma_get_reply_array(rdma_argp);
515 if (reply_ary)
516 reply_type = RDMA_NOMSG;
517 else
518 reply_type = RDMA_MSG;
519 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
520 rdma_resp, reply_type);
521
522 /* Send any write-chunk data and build resp write-list */
523 ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
34d16e42 524 rqstp, vec);
c06b540a
TT
525 if (ret < 0) {
526 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
527 ret);
afd566ea 528 goto err1;
c06b540a
TT
529 }
530 inline_bytes -= ret;
531
532 /* Send any reply-list data and update resp reply-list */
533 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
34d16e42 534 rqstp, vec);
c06b540a
TT
535 if (ret < 0) {
536 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
537 ret);
afd566ea 538 goto err1;
c06b540a
TT
539 }
540 inline_bytes -= ret;
541
34d16e42 542 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
c06b540a 543 inline_bytes);
34d16e42 544 svc_rdma_put_req_map(vec);
c06b540a
TT
545 dprintk("svcrdma: send_reply returns %d\n", ret);
546 return ret;
afd566ea
TT
547
548 err1:
549 put_page(res_page);
550 err0:
34d16e42 551 svc_rdma_put_req_map(vec);
c06b540a 552 svc_rdma_put_context(ctxt, 0);
c06b540a
TT
553 return ret;
554}
This page took 0.498424 seconds and 5 git commands to generate.