xprtrdma: Chain together all MWs in same buffer pool
authorChuck Lever <chuck.lever@oracle.com>
Tue, 29 Jul 2014 21:24:28 +0000 (17:24 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Thu, 31 Jul 2014 20:22:54 +0000 (16:22 -0400)
During connection loss recovery, need to visit every MW in a
buffer pool. Any MW that is in use by an RPC will not be on the
rb_mws list.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index b670f4d92840f87f2054a891f84ae41b95f8917b..0ad7d10f13a7fb1264e39b4e81d55647c24c46a9 100644 (file)
@@ -1074,6 +1074,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
        p += cdata->padding;
 
        INIT_LIST_HEAD(&buf->rb_mws);
+       INIT_LIST_HEAD(&buf->rb_all);
        r = (struct rpcrdma_mw *)p;
        switch (ia->ri_memreg_strategy) {
        case RPCRDMA_FRMR:
@@ -1098,6 +1099,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
                                ib_dereg_mr(r->r.frmr.fr_mr);
                                goto out;
                        }
+                       list_add(&r->mw_all, &buf->rb_all);
                        list_add(&r->mw_list, &buf->rb_mws);
                        ++r;
                }
@@ -1116,6 +1118,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
                                        " failed %i\n", __func__, rc);
                                goto out;
                        }
+                       list_add(&r->mw_all, &buf->rb_all);
                        list_add(&r->mw_list, &buf->rb_mws);
                        ++r;
                }
@@ -1225,6 +1228,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
        while (!list_empty(&buf->rb_mws)) {
                r = list_entry(buf->rb_mws.next,
                        struct rpcrdma_mw, mw_list);
+               list_del(&r->mw_all);
                list_del(&r->mw_list);
                switch (ia->ri_memreg_strategy) {
                case RPCRDMA_FRMR:
index 84c3455a95216057155ae7634cc5bef2b6421af8..c1d865287b0ea6132dceed94946b5eca3f809207 100644 (file)
@@ -151,7 +151,7 @@ struct rpcrdma_rep {
  * An external memory region is any buffer or page that is registered
  * on the fly (ie, not pre-registered).
  *
- * Each rpcrdma_buffer has a list of these anchored in rb_mws. During
+ * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
  * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
  * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
  * track of registration metadata while each RPC is pending.
@@ -175,6 +175,7 @@ struct rpcrdma_mw {
                struct rpcrdma_frmr     frmr;
        } r;
        struct list_head        mw_list;
+       struct list_head        mw_all;
 };
 
 /*
@@ -246,6 +247,7 @@ struct rpcrdma_buffer {
        atomic_t        rb_credits;     /* most recent server credits */
        int             rb_max_requests;/* client max requests */
        struct list_head rb_mws;        /* optional memory windows/fmrs/frmrs */
+       struct list_head rb_all;
        int             rb_send_index;
        struct rpcrdma_req      **rb_send_bufs;
        int             rb_recv_index;
This page took 0.026143 seconds and 5 git commands to generate.