xprtrdma: Update rkeys after transport reconnect
authorChuck Lever <chuck.lever@oracle.com>
Tue, 29 Jul 2014 21:23:43 +0000 (17:23 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Thu, 31 Jul 2014 20:22:53 +0000 (16:22 -0400)
Various reports of:

  rpcrdma_qp_async_error_upcall: QP error 3 on device mlx4_0
ep ffff8800bfd3e848

Ensure that rkeys in already-marshalled RPC/RDMA headers are
refreshed after the QP has been replaced by a reconnect.

BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=249
Suggested-by: Selvin Xavier <Selvin.Xavier@Emulex.Com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 693966d3f33ba12c1220538ff58f632ae762562d..54422f73b03ba96971386f8a906ba57515194b2b 100644 (file)
 # define RPCDBG_FACILITY       RPCDBG_TRANS
 #endif
 
-enum rpcrdma_chunktype {
-       rpcrdma_noch = 0,
-       rpcrdma_readch,
-       rpcrdma_areadch,
-       rpcrdma_writech,
-       rpcrdma_replych
-};
-
 #ifdef RPC_DEBUG
 static const char transfertypes[][12] = {
        "pure inline",  /* no chunks */
@@ -285,6 +277,28 @@ out:
        return n;
 }
 
+/*
+ * Marshal chunks. This routine returns the header length
+ * consumed by marshaling.
+ *
+ * Returns positive RPC/RDMA header size, or negative errno.
+ */
+
+ssize_t
+rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
+{
+       struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
+       struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
+
+       if (req->rl_rtype != rpcrdma_noch)
+               result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
+                                              headerp, req->rl_rtype);
+       else if (req->rl_wtype != rpcrdma_noch)
+               result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
+                                              headerp, req->rl_wtype);
+       return result;
+}
+
 /*
  * Copy write data inline.
  * This function is used for "small" requests. Data which is passed
@@ -377,7 +391,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
        char *base;
        size_t rpclen, padlen;
        ssize_t hdrlen;
-       enum rpcrdma_chunktype rtype, wtype;
        struct rpcrdma_msg *headerp;
 
        /*
@@ -415,13 +428,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
         * into pages; otherwise use reply chunks.
         */
        if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
-               wtype = rpcrdma_noch;
+               req->rl_wtype = rpcrdma_noch;
        else if (rqst->rq_rcv_buf.page_len == 0)
-               wtype = rpcrdma_replych;
+               req->rl_wtype = rpcrdma_replych;
        else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
-               wtype = rpcrdma_writech;
+               req->rl_wtype = rpcrdma_writech;
        else
-               wtype = rpcrdma_replych;
+               req->rl_wtype = rpcrdma_replych;
 
        /*
         * Chunks needed for arguments?
@@ -438,16 +451,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
         * TBD check NFSv4 setacl
         */
        if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
-               rtype = rpcrdma_noch;
+               req->rl_rtype = rpcrdma_noch;
        else if (rqst->rq_snd_buf.page_len == 0)
-               rtype = rpcrdma_areadch;
+               req->rl_rtype = rpcrdma_areadch;
        else
-               rtype = rpcrdma_readch;
+               req->rl_rtype = rpcrdma_readch;
 
        /* The following simplification is not true forever */
-       if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
-               wtype = rpcrdma_noch;
-       if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
+       if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
+               req->rl_wtype = rpcrdma_noch;
+       if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
                dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
                        __func__);
                return -EIO;
@@ -461,7 +474,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
         * When padding is in use and applies to the transfer, insert
         * it and change the message type.
         */
-       if (rtype == rpcrdma_noch) {
+       if (req->rl_rtype == rpcrdma_noch) {
 
                padlen = rpcrdma_inline_pullup(rqst,
                                                RPCRDMA_INLINE_PAD_VALUE(rqst));
@@ -476,7 +489,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
                        headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
                        headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
                        hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
-                       if (wtype != rpcrdma_noch) {
+                       if (req->rl_wtype != rpcrdma_noch) {
                                dprintk("RPC:       %s: invalid chunk list\n",
                                        __func__);
                                return -EIO;
@@ -497,30 +510,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
                         * on receive. Therefore, we request a reply chunk
                         * for non-writes wherever feasible and efficient.
                         */
-                       if (wtype == rpcrdma_noch)
-                               wtype = rpcrdma_replych;
+                       if (req->rl_wtype == rpcrdma_noch)
+                               req->rl_wtype = rpcrdma_replych;
                }
        }
 
-       /*
-        * Marshal chunks. This routine will return the header length
-        * consumed by marshaling.
-        */
-       if (rtype != rpcrdma_noch) {
-               hdrlen = rpcrdma_create_chunks(rqst,
-                                       &rqst->rq_snd_buf, headerp, rtype);
-               wtype = rtype;  /* simplify dprintk */
-
-       } else if (wtype != rpcrdma_noch) {
-               hdrlen = rpcrdma_create_chunks(rqst,
-                                       &rqst->rq_rcv_buf, headerp, wtype);
-       }
+       hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
        if (hdrlen < 0)
                return hdrlen;
 
        dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd"
                " headerp 0x%p base 0x%p lkey 0x%x\n",
-               __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
+               __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
                headerp, base, req->rl_iov.lkey);
 
        /*
index 41851020291978f095cdb1d4725b476b348e8374..f6d280b31dc9018fe4da963d0f9a9fd7ed9aa8d3 100644 (file)
@@ -597,13 +597,14 @@ xprt_rdma_send_request(struct rpc_task *task)
        struct rpc_xprt *xprt = rqst->rq_xprt;
        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
-       int rc;
+       int rc = 0;
 
-       if (req->rl_niovs == 0) {
+       if (req->rl_niovs == 0)
                rc = rpcrdma_marshal_req(rqst);
-               if (rc < 0)
-                       goto failed_marshal;
-       }
+       else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
+               rc = rpcrdma_marshal_chunks(rqst, 0);
+       if (rc < 0)
+               goto failed_marshal;
 
        if (req->rl_reply == NULL)              /* e.g. reconnection */
                rpcrdma_recv_buffer_get(req);
index f3d86b24a4af3cd198107c3fcbd761bc0258f610..c270e59cf9171e2f52497ed8370292256816f0f6 100644 (file)
@@ -99,6 +99,14 @@ struct rpcrdma_ep {
 #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
 #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
 
+enum rpcrdma_chunktype {
+       rpcrdma_noch = 0,
+       rpcrdma_readch,
+       rpcrdma_areadch,
+       rpcrdma_writech,
+       rpcrdma_replych
+};
+
 /*
  * struct rpcrdma_rep -- this structure encapsulates state required to recv
  * and complete a reply, asychronously. It needs several pieces of
@@ -192,6 +200,7 @@ struct rpcrdma_req {
        unsigned int    rl_niovs;       /* 0, 2 or 4 */
        unsigned int    rl_nchunks;     /* non-zero if chunks */
        unsigned int    rl_connect_cookie;      /* retry detection */
+       enum rpcrdma_chunktype  rl_rtype, rl_wtype;
        struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
        struct rpcrdma_rep      *rl_reply;/* holder for reply buffer */
        struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
@@ -347,6 +356,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
 /*
  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  */
+ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
 int rpcrdma_marshal_req(struct rpc_rqst *);
 size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
 
This page took 0.028289 seconds and 5 git commands to generate.