2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
46 * When the underlying transport disconnects, MRs are left in one of
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
55 * VALID: The MR was registered before the QP entered ERROR state.
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
70 #include "xprt_rdma.h"
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73 # define RPCDBG_FACILITY RPCDBG_TRANS
76 static struct workqueue_struct
*frwr_recovery_wq
;
78 #define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM)
81 frwr_alloc_recovery_wq(void)
83 frwr_recovery_wq
= alloc_workqueue("frwr_recovery",
84 FRWR_RECOVERY_WQ_FLAGS
, 0);
85 return !frwr_recovery_wq
? -ENOMEM
: 0;
89 frwr_destroy_recovery_wq(void)
91 struct workqueue_struct
*wq
;
93 if (!frwr_recovery_wq
)
96 wq
= frwr_recovery_wq
;
97 frwr_recovery_wq
= NULL
;
98 destroy_workqueue(wq
);
101 /* Deferred reset of a single FRMR. Generate a fresh rkey by
104 * There's no recovery if this fails. The FRMR is abandoned, but
105 * remains in rb_all. It will be cleaned up when the transport is
109 __frwr_recovery_worker(struct work_struct
*work
)
111 struct rpcrdma_mw
*r
= container_of(work
, struct rpcrdma_mw
,
113 struct rpcrdma_xprt
*r_xprt
= r
->r
.frmr
.fr_xprt
;
114 unsigned int depth
= r_xprt
->rx_ia
.ri_max_frmr_depth
;
115 struct ib_pd
*pd
= r_xprt
->rx_ia
.ri_pd
;
117 if (ib_dereg_mr(r
->r
.frmr
.fr_mr
))
120 r
->r
.frmr
.fr_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, depth
);
121 if (IS_ERR(r
->r
.frmr
.fr_mr
))
124 dprintk("RPC: %s: recovered FRMR %p\n", __func__
, r
);
125 r
->r
.frmr
.fr_state
= FRMR_IS_INVALID
;
126 rpcrdma_put_mw(r_xprt
, r
);
130 pr_warn("RPC: %s: FRMR %p unrecovered\n",
134 /* A broken MR was discovered in a context that can't sleep.
135 * Defer recovery to the recovery worker.
138 __frwr_queue_recovery(struct rpcrdma_mw
*r
)
140 INIT_WORK(&r
->r
.frmr
.fr_work
, __frwr_recovery_worker
);
141 queue_work(frwr_recovery_wq
, &r
->r
.frmr
.fr_work
);
145 __frwr_init(struct rpcrdma_mw
*r
, struct ib_pd
*pd
, struct ib_device
*device
,
148 struct rpcrdma_frmr
*f
= &r
->r
.frmr
;
151 f
->fr_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, depth
);
152 if (IS_ERR(f
->fr_mr
))
155 f
->sg
= kcalloc(depth
, sizeof(*f
->sg
), GFP_KERNEL
);
159 sg_init_table(f
->sg
, depth
);
164 rc
= PTR_ERR(f
->fr_mr
);
165 dprintk("RPC: %s: ib_alloc_mr status %i\n",
171 dprintk("RPC: %s: sg allocation failure\n",
173 ib_dereg_mr(f
->fr_mr
);
178 __frwr_release(struct rpcrdma_mw
*r
)
182 rc
= ib_dereg_mr(r
->r
.frmr
.fr_mr
);
184 dprintk("RPC: %s: ib_dereg_mr status %i\n",
190 frwr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
191 struct rpcrdma_create_data_internal
*cdata
)
195 ia
->ri_max_frmr_depth
=
196 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
197 ia
->ri_device
->attrs
.max_fast_reg_page_list_len
);
198 dprintk("RPC: %s: device's max FR page list len = %u\n",
199 __func__
, ia
->ri_max_frmr_depth
);
201 /* Add room for frmr register and invalidate WRs.
202 * 1. FRMR reg WR for head
203 * 2. FRMR invalidate WR for head
204 * 3. N FRMR reg WRs for pagelist
205 * 4. N FRMR invalidate WRs for pagelist
206 * 5. FRMR reg WR for tail
207 * 6. FRMR invalidate WR for tail
208 * 7. The RDMA_SEND WR
212 /* Calculate N if the device max FRMR depth is smaller than
213 * RPCRDMA_MAX_DATA_SEGS.
215 if (ia
->ri_max_frmr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
216 delta
= RPCRDMA_MAX_DATA_SEGS
- ia
->ri_max_frmr_depth
;
218 depth
+= 2; /* FRMR reg + invalidate */
219 delta
-= ia
->ri_max_frmr_depth
;
223 ep
->rep_attr
.cap
.max_send_wr
*= depth
;
224 if (ep
->rep_attr
.cap
.max_send_wr
> ia
->ri_device
->attrs
.max_qp_wr
) {
225 cdata
->max_requests
= ia
->ri_device
->attrs
.max_qp_wr
/ depth
;
226 if (!cdata
->max_requests
)
228 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
235 /* FRWR mode conveys a list of pages per chunk segment. The
236 * maximum length of that list is the FRWR page list depth.
239 frwr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
241 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
243 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
244 rpcrdma_max_segments(r_xprt
) * ia
->ri_max_frmr_depth
);
247 /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs
250 * WARNING: Only wr_id and status are reliable at this point
253 __frwr_sendcompletion_flush(struct ib_wc
*wc
, struct rpcrdma_mw
*r
)
255 if (likely(wc
->status
== IB_WC_SUCCESS
))
258 /* WARNING: Only wr_id and status are reliable at this point */
259 r
= (struct rpcrdma_mw
*)(unsigned long)wc
->wr_id
;
260 if (wc
->status
== IB_WC_WR_FLUSH_ERR
)
261 dprintk("RPC: %s: frmr %p flushed\n", __func__
, r
);
263 pr_warn("RPC: %s: frmr %p error, status %s (%d)\n",
264 __func__
, r
, ib_wc_status_msg(wc
->status
), wc
->status
);
266 r
->r
.frmr
.fr_state
= FRMR_IS_STALE
;
270 frwr_sendcompletion(struct ib_wc
*wc
)
272 struct rpcrdma_mw
*r
= (struct rpcrdma_mw
*)(unsigned long)wc
->wr_id
;
273 struct rpcrdma_frmr
*f
= &r
->r
.frmr
;
275 if (unlikely(wc
->status
!= IB_WC_SUCCESS
))
276 __frwr_sendcompletion_flush(wc
, r
);
279 complete(&f
->fr_linv_done
);
283 frwr_op_init(struct rpcrdma_xprt
*r_xprt
)
285 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
286 struct ib_device
*device
= r_xprt
->rx_ia
.ri_device
;
287 unsigned int depth
= r_xprt
->rx_ia
.ri_max_frmr_depth
;
288 struct ib_pd
*pd
= r_xprt
->rx_ia
.ri_pd
;
291 spin_lock_init(&buf
->rb_mwlock
);
292 INIT_LIST_HEAD(&buf
->rb_mws
);
293 INIT_LIST_HEAD(&buf
->rb_all
);
295 i
= max_t(int, RPCRDMA_MAX_DATA_SEGS
/ depth
, 1);
296 i
+= 2; /* head + tail */
297 i
*= buf
->rb_max_requests
; /* one set for each RPC slot */
298 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__
, i
);
301 struct rpcrdma_mw
*r
;
304 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
308 rc
= __frwr_init(r
, pd
, device
, depth
);
314 list_add(&r
->mw_list
, &buf
->rb_mws
);
315 list_add(&r
->mw_all
, &buf
->rb_all
);
316 r
->mw_sendcompletion
= frwr_sendcompletion
;
317 r
->r
.frmr
.fr_xprt
= r_xprt
;
323 /* Post a FAST_REG Work Request to register a memory region
324 * for remote access via RDMA READ or RDMA WRITE.
327 frwr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
328 int nsegs
, bool writing
)
330 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
331 struct ib_device
*device
= ia
->ri_device
;
332 enum dma_data_direction direction
= rpcrdma_data_dir(writing
);
333 struct rpcrdma_mr_seg
*seg1
= seg
;
334 struct rpcrdma_mw
*mw
;
335 struct rpcrdma_frmr
*frmr
;
337 struct ib_reg_wr
*reg_wr
;
338 struct ib_send_wr
*bad_wr
;
339 int rc
, i
, n
, dma_nents
;
346 __frwr_queue_recovery(mw
);
347 mw
= rpcrdma_get_mw(r_xprt
);
350 } while (mw
->r
.frmr
.fr_state
!= FRMR_IS_INVALID
);
352 frmr
->fr_state
= FRMR_IS_VALID
;
353 frmr
->fr_waiter
= false;
355 reg_wr
= &frmr
->fr_regwr
;
357 if (nsegs
> ia
->ri_max_frmr_depth
)
358 nsegs
= ia
->ri_max_frmr_depth
;
360 for (i
= 0; i
< nsegs
;) {
362 sg_set_page(&frmr
->sg
[i
],
365 offset_in_page(seg
->mr_offset
));
367 sg_set_buf(&frmr
->sg
[i
], seg
->mr_offset
,
373 /* Check for holes */
374 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
375 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
380 dma_nents
= ib_dma_map_sg(device
, frmr
->sg
, frmr
->sg_nents
, direction
);
382 pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n",
383 __func__
, frmr
->sg
, frmr
->sg_nents
);
387 n
= ib_map_mr_sg(mr
, frmr
->sg
, frmr
->sg_nents
, PAGE_SIZE
);
388 if (unlikely(n
!= frmr
->sg_nents
)) {
389 pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
390 __func__
, frmr
->fr_mr
, n
, frmr
->sg_nents
);
391 rc
= n
< 0 ? n
: -EINVAL
;
395 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
396 __func__
, mw
, frmr
->sg_nents
, mr
->length
);
398 key
= (u8
)(mr
->rkey
& 0x000000FF);
399 ib_update_fast_reg_key(mr
, ++key
);
401 reg_wr
->wr
.next
= NULL
;
402 reg_wr
->wr
.opcode
= IB_WR_REG_MR
;
403 reg_wr
->wr
.wr_id
= (uintptr_t)mw
;
404 reg_wr
->wr
.num_sge
= 0;
405 reg_wr
->wr
.send_flags
= 0;
407 reg_wr
->key
= mr
->rkey
;
408 reg_wr
->access
= writing
?
409 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
410 IB_ACCESS_REMOTE_READ
;
412 DECR_CQCOUNT(&r_xprt
->rx_ep
);
413 rc
= ib_post_send(ia
->ri_id
->qp
, ®_wr
->wr
, &bad_wr
);
417 seg1
->mr_dir
= direction
;
419 seg1
->mr_rkey
= mr
->rkey
;
420 seg1
->mr_base
= mr
->iova
;
421 seg1
->mr_nsegs
= frmr
->sg_nents
;
422 seg1
->mr_len
= mr
->length
;
424 return frmr
->sg_nents
;
427 dprintk("RPC: %s: ib_post_send status %i\n", __func__
, rc
);
428 ib_dma_unmap_sg(device
, frmr
->sg
, dma_nents
, direction
);
429 __frwr_queue_recovery(mw
);
433 static struct ib_send_wr
*
434 __frwr_prepare_linv_wr(struct rpcrdma_mr_seg
*seg
)
436 struct rpcrdma_mw
*mw
= seg
->rl_mw
;
437 struct rpcrdma_frmr
*f
= &mw
->r
.frmr
;
438 struct ib_send_wr
*invalidate_wr
;
440 f
->fr_waiter
= false;
441 f
->fr_state
= FRMR_IS_INVALID
;
442 invalidate_wr
= &f
->fr_invwr
;
444 memset(invalidate_wr
, 0, sizeof(*invalidate_wr
));
445 invalidate_wr
->wr_id
= (unsigned long)(void *)mw
;
446 invalidate_wr
->opcode
= IB_WR_LOCAL_INV
;
447 invalidate_wr
->ex
.invalidate_rkey
= f
->fr_mr
->rkey
;
449 return invalidate_wr
;
453 __frwr_dma_unmap(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
456 struct ib_device
*device
= r_xprt
->rx_ia
.ri_device
;
457 struct rpcrdma_mw
*mw
= seg
->rl_mw
;
458 struct rpcrdma_frmr
*f
= &mw
->r
.frmr
;
462 ib_dma_unmap_sg(device
, f
->sg
, f
->sg_nents
, seg
->mr_dir
);
465 rpcrdma_put_mw(r_xprt
, mw
);
467 __frwr_queue_recovery(mw
);
470 /* Invalidate all memory regions that were registered for "req".
472 * Sleeps until it is safe for the host CPU to access the
473 * previously mapped memory regions.
476 frwr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
478 struct ib_send_wr
*invalidate_wrs
, *pos
, *prev
, *bad_wr
;
479 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
480 struct rpcrdma_mr_seg
*seg
;
481 unsigned int i
, nchunks
;
482 struct rpcrdma_frmr
*f
;
485 dprintk("RPC: %s: req %p\n", __func__
, req
);
487 /* ORDER: Invalidate all of the req's MRs first
489 * Chain the LOCAL_INV Work Requests and post them with
490 * a single ib_post_send() call.
492 invalidate_wrs
= pos
= prev
= NULL
;
494 for (i
= 0, nchunks
= req
->rl_nchunks
; nchunks
; nchunks
--) {
495 seg
= &req
->rl_segments
[i
];
497 pos
= __frwr_prepare_linv_wr(seg
);
500 invalidate_wrs
= pos
;
507 f
= &seg
->rl_mw
->r
.frmr
;
509 /* Strong send queue ordering guarantees that when the
510 * last WR in the chain completes, all WRs in the chain
513 f
->fr_invwr
.send_flags
= IB_SEND_SIGNALED
;
515 init_completion(&f
->fr_linv_done
);
516 INIT_CQCOUNT(&r_xprt
->rx_ep
);
518 /* Transport disconnect drains the receive CQ before it
519 * replaces the QP. The RPC reply handler won't call us
520 * unless ri_id->qp is a valid pointer.
522 rc
= ib_post_send(ia
->ri_id
->qp
, invalidate_wrs
, &bad_wr
);
524 pr_warn("%s: ib_post_send failed %i\n", __func__
, rc
);
526 wait_for_completion(&f
->fr_linv_done
);
528 /* ORDER: Now DMA unmap all of the req's MRs, and return
529 * them to the free MW list.
531 for (i
= 0, nchunks
= req
->rl_nchunks
; nchunks
; nchunks
--) {
532 seg
= &req
->rl_segments
[i
];
534 __frwr_dma_unmap(r_xprt
, seg
, rc
);
543 /* Post a LOCAL_INV Work Request to prevent further remote access
544 * via RDMA READ or RDMA WRITE.
547 frwr_op_unmap(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
)
549 struct rpcrdma_mr_seg
*seg1
= seg
;
550 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
551 struct rpcrdma_mw
*mw
= seg1
->rl_mw
;
552 struct rpcrdma_frmr
*frmr
= &mw
->r
.frmr
;
553 struct ib_send_wr
*invalidate_wr
, *bad_wr
;
554 int rc
, nsegs
= seg
->mr_nsegs
;
556 dprintk("RPC: %s: FRMR %p\n", __func__
, mw
);
559 frmr
->fr_state
= FRMR_IS_INVALID
;
560 invalidate_wr
= &mw
->r
.frmr
.fr_invwr
;
562 memset(invalidate_wr
, 0, sizeof(*invalidate_wr
));
563 invalidate_wr
->wr_id
= (uintptr_t)mw
;
564 invalidate_wr
->opcode
= IB_WR_LOCAL_INV
;
565 invalidate_wr
->ex
.invalidate_rkey
= frmr
->fr_mr
->rkey
;
566 DECR_CQCOUNT(&r_xprt
->rx_ep
);
568 ib_dma_unmap_sg(ia
->ri_device
, frmr
->sg
, frmr
->sg_nents
, seg1
->mr_dir
);
569 read_lock(&ia
->ri_qplock
);
570 rc
= ib_post_send(ia
->ri_id
->qp
, invalidate_wr
, &bad_wr
);
571 read_unlock(&ia
->ri_qplock
);
575 rpcrdma_put_mw(r_xprt
, mw
);
579 dprintk("RPC: %s: ib_post_send status %i\n", __func__
, rc
);
580 __frwr_queue_recovery(mw
);
585 frwr_op_destroy(struct rpcrdma_buffer
*buf
)
587 struct rpcrdma_mw
*r
;
589 /* Ensure stale MWs for "buf" are no longer in flight */
590 flush_workqueue(frwr_recovery_wq
);
592 while (!list_empty(&buf
->rb_all
)) {
593 r
= list_entry(buf
->rb_all
.next
, struct rpcrdma_mw
, mw_all
);
594 list_del(&r
->mw_all
);
600 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops
= {
601 .ro_map
= frwr_op_map
,
602 .ro_unmap_sync
= frwr_op_unmap_sync
,
603 .ro_unmap
= frwr_op_unmap
,
604 .ro_open
= frwr_op_open
,
605 .ro_maxpages
= frwr_op_maxpages
,
606 .ro_init
= frwr_op_init
,
607 .ro_destroy
= frwr_op_destroy
,
608 .ro_displayname
= "frwr",