2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Encapsulates the major functions managing:
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <asm/bitops.h>
54 #include "xprt_rdma.h"
61 # define RPCDBG_FACILITY RPCDBG_TRANS
69 * handle replies in tasklet context, using a single, global list
70 * rdma tasklet function -- just turn around and call the func
71 * for all replies on the list
74 static DEFINE_SPINLOCK(rpcrdma_tk_lock_g
);
75 static LIST_HEAD(rpcrdma_tasklets_g
);
78 rpcrdma_run_tasklet(unsigned long data
)
80 struct rpcrdma_rep
*rep
;
81 void (*func
)(struct rpcrdma_rep
*);
85 spin_lock_irqsave(&rpcrdma_tk_lock_g
, flags
);
86 while (!list_empty(&rpcrdma_tasklets_g
)) {
87 rep
= list_entry(rpcrdma_tasklets_g
.next
,
88 struct rpcrdma_rep
, rr_list
);
89 list_del(&rep
->rr_list
);
92 spin_unlock_irqrestore(&rpcrdma_tk_lock_g
, flags
);
97 rpcrdma_recv_buffer_put(rep
);
99 spin_lock_irqsave(&rpcrdma_tk_lock_g
, flags
);
101 spin_unlock_irqrestore(&rpcrdma_tk_lock_g
, flags
);
104 static DECLARE_TASKLET(rpcrdma_tasklet_g
, rpcrdma_run_tasklet
, 0UL);
107 rpcrdma_schedule_tasklet(struct rpcrdma_rep
*rep
)
111 spin_lock_irqsave(&rpcrdma_tk_lock_g
, flags
);
112 list_add_tail(&rep
->rr_list
, &rpcrdma_tasklets_g
);
113 spin_unlock_irqrestore(&rpcrdma_tk_lock_g
, flags
);
114 tasklet_schedule(&rpcrdma_tasklet_g
);
118 rpcrdma_qp_async_error_upcall(struct ib_event
*event
, void *context
)
120 struct rpcrdma_ep
*ep
= context
;
122 dprintk("RPC: %s: QP error %X on device %s ep %p\n",
123 __func__
, event
->event
, event
->device
->name
, context
);
124 if (ep
->rep_connected
== 1) {
125 ep
->rep_connected
= -EIO
;
127 wake_up_all(&ep
->rep_connect_wait
);
132 rpcrdma_cq_async_error_upcall(struct ib_event
*event
, void *context
)
134 struct rpcrdma_ep
*ep
= context
;
136 dprintk("RPC: %s: CQ error %X on device %s ep %p\n",
137 __func__
, event
->event
, event
->device
->name
, context
);
138 if (ep
->rep_connected
== 1) {
139 ep
->rep_connected
= -EIO
;
141 wake_up_all(&ep
->rep_connect_wait
);
146 rpcrdma_sendcq_process_wc(struct ib_wc
*wc
)
148 struct rpcrdma_mw
*frmr
= (struct rpcrdma_mw
*)(unsigned long)wc
->wr_id
;
150 dprintk("RPC: %s: frmr %p status %X opcode %d\n",
151 __func__
, frmr
, wc
->status
, wc
->opcode
);
153 if (wc
->wr_id
== 0ULL)
155 if (wc
->status
!= IB_WC_SUCCESS
)
158 if (wc
->opcode
== IB_WC_FAST_REG_MR
)
159 frmr
->r
.frmr
.state
= FRMR_IS_VALID
;
160 else if (wc
->opcode
== IB_WC_LOCAL_INV
)
161 frmr
->r
.frmr
.state
= FRMR_IS_INVALID
;
165 rpcrdma_sendcq_poll(struct ib_cq
*cq
, struct rpcrdma_ep
*ep
)
168 int budget
, count
, rc
;
170 budget
= RPCRDMA_WC_BUDGET
/ RPCRDMA_POLLSIZE
;
172 wcs
= ep
->rep_send_wcs
;
174 rc
= ib_poll_cq(cq
, RPCRDMA_POLLSIZE
, wcs
);
180 rpcrdma_sendcq_process_wc(wcs
++);
181 } while (rc
== RPCRDMA_POLLSIZE
&& --budget
);
186 * Handle send, fast_reg_mr, and local_inv completions.
188 * Send events are typically suppressed and thus do not result
189 * in an upcall. Occasionally one is signaled, however. This
190 * prevents the provider's completion queue from wrapping and
191 * losing a completion.
194 rpcrdma_sendcq_upcall(struct ib_cq
*cq
, void *cq_context
)
196 struct rpcrdma_ep
*ep
= (struct rpcrdma_ep
*)cq_context
;
199 rc
= rpcrdma_sendcq_poll(cq
, ep
);
201 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
206 rc
= ib_req_notify_cq(cq
,
207 IB_CQ_NEXT_COMP
| IB_CQ_REPORT_MISSED_EVENTS
);
211 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
216 rpcrdma_sendcq_poll(cq
, ep
);
220 rpcrdma_recvcq_process_wc(struct ib_wc
*wc
)
222 struct rpcrdma_rep
*rep
=
223 (struct rpcrdma_rep
*)(unsigned long)wc
->wr_id
;
225 dprintk("RPC: %s: rep %p status %X opcode %X length %u\n",
226 __func__
, rep
, wc
->status
, wc
->opcode
, wc
->byte_len
);
228 if (wc
->status
!= IB_WC_SUCCESS
) {
232 if (wc
->opcode
!= IB_WC_RECV
)
235 rep
->rr_len
= wc
->byte_len
;
236 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep
->rr_buffer
)->ri_id
->device
,
237 rep
->rr_iov
.addr
, rep
->rr_len
, DMA_FROM_DEVICE
);
239 if (rep
->rr_len
>= 16) {
240 struct rpcrdma_msg
*p
= (struct rpcrdma_msg
*)rep
->rr_base
;
241 unsigned int credits
= ntohl(p
->rm_credit
);
244 credits
= 1; /* don't deadlock */
245 else if (credits
> rep
->rr_buffer
->rb_max_requests
)
246 credits
= rep
->rr_buffer
->rb_max_requests
;
247 atomic_set(&rep
->rr_buffer
->rb_credits
, credits
);
251 rpcrdma_schedule_tasklet(rep
);
255 rpcrdma_recvcq_poll(struct ib_cq
*cq
, struct rpcrdma_ep
*ep
)
258 int budget
, count
, rc
;
260 budget
= RPCRDMA_WC_BUDGET
/ RPCRDMA_POLLSIZE
;
262 wcs
= ep
->rep_recv_wcs
;
264 rc
= ib_poll_cq(cq
, RPCRDMA_POLLSIZE
, wcs
);
270 rpcrdma_recvcq_process_wc(wcs
++);
271 } while (rc
== RPCRDMA_POLLSIZE
&& --budget
);
276 * Handle receive completions.
278 * It is reentrant but processes single events in order to maintain
279 * ordering of receives to keep server credits.
281 * It is the responsibility of the scheduled tasklet to return
282 * recv buffers to the pool. NOTE: this affects synchronization of
283 * connection shutdown. That is, the structures required for
284 * the completion of the reply handler must remain intact until
285 * all memory has been reclaimed.
288 rpcrdma_recvcq_upcall(struct ib_cq
*cq
, void *cq_context
)
290 struct rpcrdma_ep
*ep
= (struct rpcrdma_ep
*)cq_context
;
293 rc
= rpcrdma_recvcq_poll(cq
, ep
);
295 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
300 rc
= ib_req_notify_cq(cq
,
301 IB_CQ_NEXT_COMP
| IB_CQ_REPORT_MISSED_EVENTS
);
305 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
310 rpcrdma_recvcq_poll(cq
, ep
);
314 static const char * const conn
[] = {
331 rpcrdma_conn_upcall(struct rdma_cm_id
*id
, struct rdma_cm_event
*event
)
333 struct rpcrdma_xprt
*xprt
= id
->context
;
334 struct rpcrdma_ia
*ia
= &xprt
->rx_ia
;
335 struct rpcrdma_ep
*ep
= &xprt
->rx_ep
;
337 struct sockaddr_in
*addr
= (struct sockaddr_in
*) &ep
->rep_remote_addr
;
339 struct ib_qp_attr attr
;
340 struct ib_qp_init_attr iattr
;
343 switch (event
->event
) {
344 case RDMA_CM_EVENT_ADDR_RESOLVED
:
345 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
347 complete(&ia
->ri_done
);
349 case RDMA_CM_EVENT_ADDR_ERROR
:
350 ia
->ri_async_rc
= -EHOSTUNREACH
;
351 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
353 complete(&ia
->ri_done
);
355 case RDMA_CM_EVENT_ROUTE_ERROR
:
356 ia
->ri_async_rc
= -ENETUNREACH
;
357 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
359 complete(&ia
->ri_done
);
361 case RDMA_CM_EVENT_ESTABLISHED
:
363 ib_query_qp(ia
->ri_id
->qp
, &attr
,
364 IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_MAX_DEST_RD_ATOMIC
,
366 dprintk("RPC: %s: %d responder resources"
368 __func__
, attr
.max_dest_rd_atomic
, attr
.max_rd_atomic
);
370 case RDMA_CM_EVENT_CONNECT_ERROR
:
371 connstate
= -ENOTCONN
;
373 case RDMA_CM_EVENT_UNREACHABLE
:
374 connstate
= -ENETDOWN
;
376 case RDMA_CM_EVENT_REJECTED
:
377 connstate
= -ECONNREFUSED
;
379 case RDMA_CM_EVENT_DISCONNECTED
:
380 connstate
= -ECONNABORTED
;
382 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
385 dprintk("RPC: %s: %s: %pI4:%u (ep 0x%p event 0x%x)\n",
387 (event
->event
<= 11) ? conn
[event
->event
] :
388 "unknown connection error",
389 &addr
->sin_addr
.s_addr
,
390 ntohs(addr
->sin_port
),
392 atomic_set(&rpcx_to_rdmax(ep
->rep_xprt
)->rx_buf
.rb_credits
, 1);
393 dprintk("RPC: %s: %sconnected\n",
394 __func__
, connstate
> 0 ? "" : "dis");
395 ep
->rep_connected
= connstate
;
397 wake_up_all(&ep
->rep_connect_wait
);
400 dprintk("RPC: %s: unexpected CM event %d\n",
401 __func__
, event
->event
);
406 if (connstate
== 1) {
407 int ird
= attr
.max_dest_rd_atomic
;
408 int tird
= ep
->rep_remote_cma
.responder_resources
;
409 printk(KERN_INFO
"rpcrdma: connection to %pI4:%u "
410 "on %s, memreg %d slots %d ird %d%s\n",
411 &addr
->sin_addr
.s_addr
,
412 ntohs(addr
->sin_port
),
413 ia
->ri_id
->device
->name
,
414 ia
->ri_memreg_strategy
,
415 xprt
->rx_buf
.rb_max_requests
,
416 ird
, ird
< 4 && ird
< tird
/ 2 ? " (low!)" : "");
417 } else if (connstate
< 0) {
418 printk(KERN_INFO
"rpcrdma: connection to %pI4:%u closed (%d)\n",
419 &addr
->sin_addr
.s_addr
,
420 ntohs(addr
->sin_port
),
428 static struct rdma_cm_id
*
429 rpcrdma_create_id(struct rpcrdma_xprt
*xprt
,
430 struct rpcrdma_ia
*ia
, struct sockaddr
*addr
)
432 struct rdma_cm_id
*id
;
435 init_completion(&ia
->ri_done
);
437 id
= rdma_create_id(rpcrdma_conn_upcall
, xprt
, RDMA_PS_TCP
, IB_QPT_RC
);
440 dprintk("RPC: %s: rdma_create_id() failed %i\n",
445 ia
->ri_async_rc
= -ETIMEDOUT
;
446 rc
= rdma_resolve_addr(id
, NULL
, addr
, RDMA_RESOLVE_TIMEOUT
);
448 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
452 wait_for_completion_interruptible_timeout(&ia
->ri_done
,
453 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT
) + 1);
454 rc
= ia
->ri_async_rc
;
458 ia
->ri_async_rc
= -ETIMEDOUT
;
459 rc
= rdma_resolve_route(id
, RDMA_RESOLVE_TIMEOUT
);
461 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
465 wait_for_completion_interruptible_timeout(&ia
->ri_done
,
466 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT
) + 1);
467 rc
= ia
->ri_async_rc
;
479 * Drain any cq, prior to teardown.
482 rpcrdma_clean_cq(struct ib_cq
*cq
)
487 while (1 == ib_poll_cq(cq
, 1, &wc
))
491 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
492 __func__
, count
, wc
.opcode
);
496 * Exported functions.
500 * Open and initialize an Interface Adapter.
501 * o initializes fields of struct rpcrdma_ia, including
502 * interface and provider attributes and protection zone.
505 rpcrdma_ia_open(struct rpcrdma_xprt
*xprt
, struct sockaddr
*addr
, int memreg
)
508 struct ib_device_attr devattr
;
509 struct rpcrdma_ia
*ia
= &xprt
->rx_ia
;
511 ia
->ri_id
= rpcrdma_create_id(xprt
, ia
, addr
);
512 if (IS_ERR(ia
->ri_id
)) {
513 rc
= PTR_ERR(ia
->ri_id
);
517 ia
->ri_pd
= ib_alloc_pd(ia
->ri_id
->device
);
518 if (IS_ERR(ia
->ri_pd
)) {
519 rc
= PTR_ERR(ia
->ri_pd
);
520 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
526 * Query the device to determine if the requested memory
527 * registration strategy is supported. If it isn't, set the
528 * strategy to a globally supported model.
530 rc
= ib_query_device(ia
->ri_id
->device
, &devattr
);
532 dprintk("RPC: %s: ib_query_device failed %d\n",
537 if (devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
) {
538 ia
->ri_have_dma_lkey
= 1;
539 ia
->ri_dma_lkey
= ia
->ri_id
->device
->local_dma_lkey
;
542 if (memreg
== RPCRDMA_FRMR
) {
543 /* Requires both frmr reg and local dma lkey */
544 if ((devattr
.device_cap_flags
&
545 (IB_DEVICE_MEM_MGT_EXTENSIONS
|IB_DEVICE_LOCAL_DMA_LKEY
)) !=
546 (IB_DEVICE_MEM_MGT_EXTENSIONS
|IB_DEVICE_LOCAL_DMA_LKEY
)) {
547 dprintk("RPC: %s: FRMR registration "
548 "not supported by HCA\n", __func__
);
549 memreg
= RPCRDMA_MTHCAFMR
;
551 /* Mind the ia limit on FRMR page list depth */
552 ia
->ri_max_frmr_depth
= min_t(unsigned int,
553 RPCRDMA_MAX_DATA_SEGS
,
554 devattr
.max_fast_reg_page_list_len
);
557 if (memreg
== RPCRDMA_MTHCAFMR
) {
558 if (!ia
->ri_id
->device
->alloc_fmr
) {
559 dprintk("RPC: %s: MTHCAFMR registration "
560 "not supported by HCA\n", __func__
);
561 #if RPCRDMA_PERSISTENT_REGISTRATION
562 memreg
= RPCRDMA_ALLPHYSICAL
;
571 * Optionally obtain an underlying physical identity mapping in
572 * order to do a memory window-based bind. This base registration
573 * is protected from remote access - that is enabled only by binding
574 * for the specific bytes targeted during each RPC operation, and
575 * revoked after the corresponding completion similar to a storage
581 #if RPCRDMA_PERSISTENT_REGISTRATION
582 case RPCRDMA_ALLPHYSICAL
:
583 mem_priv
= IB_ACCESS_LOCAL_WRITE
|
584 IB_ACCESS_REMOTE_WRITE
|
585 IB_ACCESS_REMOTE_READ
;
588 case RPCRDMA_MTHCAFMR
:
589 if (ia
->ri_have_dma_lkey
)
591 mem_priv
= IB_ACCESS_LOCAL_WRITE
;
592 #if RPCRDMA_PERSISTENT_REGISTRATION
595 ia
->ri_bind_mem
= ib_get_dma_mr(ia
->ri_pd
, mem_priv
);
596 if (IS_ERR(ia
->ri_bind_mem
)) {
597 printk(KERN_ALERT
"%s: ib_get_dma_mr for "
598 "phys register failed with %lX\n",
599 __func__
, PTR_ERR(ia
->ri_bind_mem
));
605 printk(KERN_ERR
"RPC: Unsupported memory "
606 "registration mode: %d\n", memreg
);
610 dprintk("RPC: %s: memory registration strategy is %d\n",
613 /* Else will do memory reg/dereg for each chunk */
614 ia
->ri_memreg_strategy
= memreg
;
618 rdma_destroy_id(ia
->ri_id
);
625 * Clean up/close an IA.
626 * o if event handles and PD have been initialized, free them.
630 rpcrdma_ia_close(struct rpcrdma_ia
*ia
)
634 dprintk("RPC: %s: entering\n", __func__
);
635 if (ia
->ri_bind_mem
!= NULL
) {
636 rc
= ib_dereg_mr(ia
->ri_bind_mem
);
637 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
640 if (ia
->ri_id
!= NULL
&& !IS_ERR(ia
->ri_id
)) {
642 rdma_destroy_qp(ia
->ri_id
);
643 rdma_destroy_id(ia
->ri_id
);
646 if (ia
->ri_pd
!= NULL
&& !IS_ERR(ia
->ri_pd
)) {
647 rc
= ib_dealloc_pd(ia
->ri_pd
);
648 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
654 * Create unconnected endpoint.
657 rpcrdma_ep_create(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
,
658 struct rpcrdma_create_data_internal
*cdata
)
660 struct ib_device_attr devattr
;
661 struct ib_cq
*sendcq
, *recvcq
;
664 rc
= ib_query_device(ia
->ri_id
->device
, &devattr
);
666 dprintk("RPC: %s: ib_query_device failed %d\n",
671 /* check provider's send/recv wr limits */
672 if (cdata
->max_requests
> devattr
.max_qp_wr
)
673 cdata
->max_requests
= devattr
.max_qp_wr
;
675 ep
->rep_attr
.event_handler
= rpcrdma_qp_async_error_upcall
;
676 ep
->rep_attr
.qp_context
= ep
;
677 /* send_cq and recv_cq initialized below */
678 ep
->rep_attr
.srq
= NULL
;
679 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
;
680 switch (ia
->ri_memreg_strategy
) {
684 /* Add room for frmr register and invalidate WRs.
685 * 1. FRMR reg WR for head
686 * 2. FRMR invalidate WR for head
687 * 3. N FRMR reg WRs for pagelist
688 * 4. N FRMR invalidate WRs for pagelist
689 * 5. FRMR reg WR for tail
690 * 6. FRMR invalidate WR for tail
691 * 7. The RDMA_SEND WR
694 /* Calculate N if the device max FRMR depth is smaller than
695 * RPCRDMA_MAX_DATA_SEGS.
697 if (ia
->ri_max_frmr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
698 int delta
= RPCRDMA_MAX_DATA_SEGS
-
699 ia
->ri_max_frmr_depth
;
702 depth
+= 2; /* FRMR reg + invalidate */
703 delta
-= ia
->ri_max_frmr_depth
;
707 ep
->rep_attr
.cap
.max_send_wr
*= depth
;
708 if (ep
->rep_attr
.cap
.max_send_wr
> devattr
.max_qp_wr
) {
709 cdata
->max_requests
= devattr
.max_qp_wr
/ depth
;
710 if (!cdata
->max_requests
)
712 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
720 ep
->rep_attr
.cap
.max_recv_wr
= cdata
->max_requests
;
721 ep
->rep_attr
.cap
.max_send_sge
= (cdata
->padding
? 4 : 2);
722 ep
->rep_attr
.cap
.max_recv_sge
= 1;
723 ep
->rep_attr
.cap
.max_inline_data
= 0;
724 ep
->rep_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
725 ep
->rep_attr
.qp_type
= IB_QPT_RC
;
726 ep
->rep_attr
.port_num
= ~0;
728 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
729 "iovs: send %d recv %d\n",
731 ep
->rep_attr
.cap
.max_send_wr
,
732 ep
->rep_attr
.cap
.max_recv_wr
,
733 ep
->rep_attr
.cap
.max_send_sge
,
734 ep
->rep_attr
.cap
.max_recv_sge
);
736 /* set trigger for requesting send completion */
737 ep
->rep_cqinit
= ep
->rep_attr
.cap
.max_send_wr
/2 - 1;
738 if (ep
->rep_cqinit
<= 2)
742 init_waitqueue_head(&ep
->rep_connect_wait
);
743 INIT_DELAYED_WORK(&ep
->rep_connect_worker
, rpcrdma_connect_worker
);
745 sendcq
= ib_create_cq(ia
->ri_id
->device
, rpcrdma_sendcq_upcall
,
746 rpcrdma_cq_async_error_upcall
, ep
,
747 ep
->rep_attr
.cap
.max_send_wr
+ 1, 0);
748 if (IS_ERR(sendcq
)) {
749 rc
= PTR_ERR(sendcq
);
750 dprintk("RPC: %s: failed to create send CQ: %i\n",
755 rc
= ib_req_notify_cq(sendcq
, IB_CQ_NEXT_COMP
);
757 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
762 recvcq
= ib_create_cq(ia
->ri_id
->device
, rpcrdma_recvcq_upcall
,
763 rpcrdma_cq_async_error_upcall
, ep
,
764 ep
->rep_attr
.cap
.max_recv_wr
+ 1, 0);
765 if (IS_ERR(recvcq
)) {
766 rc
= PTR_ERR(recvcq
);
767 dprintk("RPC: %s: failed to create recv CQ: %i\n",
772 rc
= ib_req_notify_cq(recvcq
, IB_CQ_NEXT_COMP
);
774 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
776 ib_destroy_cq(recvcq
);
780 ep
->rep_attr
.send_cq
= sendcq
;
781 ep
->rep_attr
.recv_cq
= recvcq
;
783 /* Initialize cma parameters */
785 /* RPC/RDMA does not use private data */
786 ep
->rep_remote_cma
.private_data
= NULL
;
787 ep
->rep_remote_cma
.private_data_len
= 0;
789 /* Client offers RDMA Read but does not initiate */
790 ep
->rep_remote_cma
.initiator_depth
= 0;
791 if (devattr
.max_qp_rd_atom
> 32) /* arbitrary but <= 255 */
792 ep
->rep_remote_cma
.responder_resources
= 32;
794 ep
->rep_remote_cma
.responder_resources
= devattr
.max_qp_rd_atom
;
796 ep
->rep_remote_cma
.retry_count
= 7;
797 ep
->rep_remote_cma
.flow_control
= 0;
798 ep
->rep_remote_cma
.rnr_retry_count
= 0;
803 err
= ib_destroy_cq(sendcq
);
805 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
814 * Disconnect and destroy endpoint. After this, the only
815 * valid operations on the ep are to free it (if dynamically
816 * allocated) or re-create it.
819 rpcrdma_ep_destroy(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
823 dprintk("RPC: %s: entering, connected is %d\n",
824 __func__
, ep
->rep_connected
);
826 cancel_delayed_work_sync(&ep
->rep_connect_worker
);
829 rc
= rpcrdma_ep_disconnect(ep
, ia
);
831 dprintk("RPC: %s: rpcrdma_ep_disconnect"
832 " returned %i\n", __func__
, rc
);
833 rdma_destroy_qp(ia
->ri_id
);
834 ia
->ri_id
->qp
= NULL
;
837 /* padding - could be done in rpcrdma_buffer_destroy... */
838 if (ep
->rep_pad_mr
) {
839 rpcrdma_deregister_internal(ia
, ep
->rep_pad_mr
, &ep
->rep_pad
);
840 ep
->rep_pad_mr
= NULL
;
843 rpcrdma_clean_cq(ep
->rep_attr
.recv_cq
);
844 rc
= ib_destroy_cq(ep
->rep_attr
.recv_cq
);
846 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
849 rpcrdma_clean_cq(ep
->rep_attr
.send_cq
);
850 rc
= ib_destroy_cq(ep
->rep_attr
.send_cq
);
852 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
857 * Connect unconnected endpoint.
860 rpcrdma_ep_connect(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
862 struct rdma_cm_id
*id
;
866 if (ep
->rep_connected
!= 0) {
867 struct rpcrdma_xprt
*xprt
;
869 dprintk("RPC: %s: reconnecting...\n", __func__
);
870 rc
= rpcrdma_ep_disconnect(ep
, ia
);
871 if (rc
&& rc
!= -ENOTCONN
)
872 dprintk("RPC: %s: rpcrdma_ep_disconnect"
873 " status %i\n", __func__
, rc
);
875 rpcrdma_clean_cq(ep
->rep_attr
.recv_cq
);
876 rpcrdma_clean_cq(ep
->rep_attr
.send_cq
);
878 xprt
= container_of(ia
, struct rpcrdma_xprt
, rx_ia
);
879 id
= rpcrdma_create_id(xprt
, ia
,
880 (struct sockaddr
*)&xprt
->rx_data
.addr
);
885 /* TEMP TEMP TEMP - fail if new device:
886 * Deregister/remarshal *all* requests!
887 * Close and recreate adapter, pd, etc!
888 * Re-determine all attributes still sane!
889 * More stuff I haven't thought of!
892 if (ia
->ri_id
->device
!= id
->device
) {
893 printk("RPC: %s: can't reconnect on "
894 "different device!\n", __func__
);
900 rc
= rdma_create_qp(id
, ia
->ri_pd
, &ep
->rep_attr
);
902 dprintk("RPC: %s: rdma_create_qp failed %i\n",
908 rdma_destroy_qp(ia
->ri_id
);
909 rdma_destroy_id(ia
->ri_id
);
912 dprintk("RPC: %s: connecting...\n", __func__
);
913 rc
= rdma_create_qp(ia
->ri_id
, ia
->ri_pd
, &ep
->rep_attr
);
915 dprintk("RPC: %s: rdma_create_qp failed %i\n",
917 /* do not update ep->rep_connected */
922 ep
->rep_connected
= 0;
924 rc
= rdma_connect(ia
->ri_id
, &ep
->rep_remote_cma
);
926 dprintk("RPC: %s: rdma_connect() failed with %i\n",
931 wait_event_interruptible(ep
->rep_connect_wait
, ep
->rep_connected
!= 0);
934 * Check state. A non-peer reject indicates no listener
935 * (ECONNREFUSED), which may be a transient state. All
936 * others indicate a transport condition which has already
937 * undergone a best-effort.
939 if (ep
->rep_connected
== -ECONNREFUSED
&&
940 ++retry_count
<= RDMA_CONNECT_RETRY_MAX
) {
941 dprintk("RPC: %s: non-peer_reject, retry\n", __func__
);
944 if (ep
->rep_connected
<= 0) {
945 /* Sometimes, the only way to reliably connect to remote
946 * CMs is to use same nonzero values for ORD and IRD. */
947 if (retry_count
++ <= RDMA_CONNECT_RETRY_MAX
+ 1 &&
948 (ep
->rep_remote_cma
.responder_resources
== 0 ||
949 ep
->rep_remote_cma
.initiator_depth
!=
950 ep
->rep_remote_cma
.responder_resources
)) {
951 if (ep
->rep_remote_cma
.responder_resources
== 0)
952 ep
->rep_remote_cma
.responder_resources
= 1;
953 ep
->rep_remote_cma
.initiator_depth
=
954 ep
->rep_remote_cma
.responder_resources
;
957 rc
= ep
->rep_connected
;
959 dprintk("RPC: %s: connected\n", __func__
);
964 ep
->rep_connected
= rc
;
969 * rpcrdma_ep_disconnect
971 * This is separate from destroy to facilitate the ability
972 * to reconnect without recreating the endpoint.
974 * This call is not reentrant, and must not be made in parallel
975 * on the same endpoint.
978 rpcrdma_ep_disconnect(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
982 rpcrdma_clean_cq(ep
->rep_attr
.recv_cq
);
983 rpcrdma_clean_cq(ep
->rep_attr
.send_cq
);
984 rc
= rdma_disconnect(ia
->ri_id
);
986 /* returns without wait if not connected */
987 wait_event_interruptible(ep
->rep_connect_wait
,
988 ep
->rep_connected
!= 1);
989 dprintk("RPC: %s: after wait, %sconnected\n", __func__
,
990 (ep
->rep_connected
== 1) ? "still " : "dis");
992 dprintk("RPC: %s: rdma_disconnect %i\n", __func__
, rc
);
993 ep
->rep_connected
= rc
;
999 * Initialize buffer memory
1002 rpcrdma_buffer_create(struct rpcrdma_buffer
*buf
, struct rpcrdma_ep
*ep
,
1003 struct rpcrdma_ia
*ia
, struct rpcrdma_create_data_internal
*cdata
)
1006 size_t len
, rlen
, wlen
;
1008 struct rpcrdma_mw
*r
;
1010 buf
->rb_max_requests
= cdata
->max_requests
;
1011 spin_lock_init(&buf
->rb_lock
);
1012 atomic_set(&buf
->rb_credits
, 1);
1014 /* Need to allocate:
1015 * 1. arrays for send and recv pointers
1016 * 2. arrays of struct rpcrdma_req to fill in pointers
1017 * 3. array of struct rpcrdma_rep for replies
1018 * 4. padding, if any
1019 * 5. mw's, fmr's or frmr's, if any
1020 * Send/recv buffers in req/rep need to be registered
1023 len
= buf
->rb_max_requests
*
1024 (sizeof(struct rpcrdma_req
*) + sizeof(struct rpcrdma_rep
*));
1025 len
+= cdata
->padding
;
1026 switch (ia
->ri_memreg_strategy
) {
1028 len
+= buf
->rb_max_requests
* RPCRDMA_MAX_SEGS
*
1029 sizeof(struct rpcrdma_mw
);
1031 case RPCRDMA_MTHCAFMR
:
1032 /* TBD we are perhaps overallocating here */
1033 len
+= (buf
->rb_max_requests
+ 1) * RPCRDMA_MAX_SEGS
*
1034 sizeof(struct rpcrdma_mw
);
1040 /* allocate 1, 4 and 5 in one shot */
1041 p
= kzalloc(len
, GFP_KERNEL
);
1043 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1048 buf
->rb_pool
= p
; /* for freeing it later */
1050 buf
->rb_send_bufs
= (struct rpcrdma_req
**) p
;
1051 p
= (char *) &buf
->rb_send_bufs
[buf
->rb_max_requests
];
1052 buf
->rb_recv_bufs
= (struct rpcrdma_rep
**) p
;
1053 p
= (char *) &buf
->rb_recv_bufs
[buf
->rb_max_requests
];
1056 * Register the zeroed pad buffer, if any.
1058 if (cdata
->padding
) {
1059 rc
= rpcrdma_register_internal(ia
, p
, cdata
->padding
,
1060 &ep
->rep_pad_mr
, &ep
->rep_pad
);
1064 p
+= cdata
->padding
;
1066 INIT_LIST_HEAD(&buf
->rb_mws
);
1067 r
= (struct rpcrdma_mw
*)p
;
1068 switch (ia
->ri_memreg_strategy
) {
1070 for (i
= buf
->rb_max_requests
* RPCRDMA_MAX_SEGS
; i
; i
--) {
1071 r
->r
.frmr
.fr_mr
= ib_alloc_fast_reg_mr(ia
->ri_pd
,
1072 ia
->ri_max_frmr_depth
);
1073 if (IS_ERR(r
->r
.frmr
.fr_mr
)) {
1074 rc
= PTR_ERR(r
->r
.frmr
.fr_mr
);
1075 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1076 " failed %i\n", __func__
, rc
);
1079 r
->r
.frmr
.fr_pgl
= ib_alloc_fast_reg_page_list(
1081 ia
->ri_max_frmr_depth
);
1082 if (IS_ERR(r
->r
.frmr
.fr_pgl
)) {
1083 rc
= PTR_ERR(r
->r
.frmr
.fr_pgl
);
1085 "ib_alloc_fast_reg_page_list "
1086 "failed %i\n", __func__
, rc
);
1088 ib_dereg_mr(r
->r
.frmr
.fr_mr
);
1091 list_add(&r
->mw_list
, &buf
->rb_mws
);
1095 case RPCRDMA_MTHCAFMR
:
1096 /* TBD we are perhaps overallocating here */
1097 for (i
= (buf
->rb_max_requests
+1) * RPCRDMA_MAX_SEGS
; i
; i
--) {
1098 static struct ib_fmr_attr fa
=
1099 { RPCRDMA_MAX_DATA_SEGS
, 1, PAGE_SHIFT
};
1100 r
->r
.fmr
= ib_alloc_fmr(ia
->ri_pd
,
1101 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_REMOTE_READ
,
1103 if (IS_ERR(r
->r
.fmr
)) {
1104 rc
= PTR_ERR(r
->r
.fmr
);
1105 dprintk("RPC: %s: ib_alloc_fmr"
1106 " failed %i\n", __func__
, rc
);
1109 list_add(&r
->mw_list
, &buf
->rb_mws
);
1118 * Allocate/init the request/reply buffers. Doing this
1119 * using kmalloc for now -- one for each buf.
1121 wlen
= 1 << fls(cdata
->inline_wsize
+ sizeof(struct rpcrdma_req
));
1122 rlen
= 1 << fls(cdata
->inline_rsize
+ sizeof(struct rpcrdma_rep
));
1123 dprintk("RPC: %s: wlen = %zu, rlen = %zu\n",
1124 __func__
, wlen
, rlen
);
1126 for (i
= 0; i
< buf
->rb_max_requests
; i
++) {
1127 struct rpcrdma_req
*req
;
1128 struct rpcrdma_rep
*rep
;
1130 req
= kmalloc(wlen
, GFP_KERNEL
);
1132 dprintk("RPC: %s: request buffer %d alloc"
1133 " failed\n", __func__
, i
);
1137 memset(req
, 0, sizeof(struct rpcrdma_req
));
1138 buf
->rb_send_bufs
[i
] = req
;
1139 buf
->rb_send_bufs
[i
]->rl_buffer
= buf
;
1141 rc
= rpcrdma_register_internal(ia
, req
->rl_base
,
1142 wlen
- offsetof(struct rpcrdma_req
, rl_base
),
1143 &buf
->rb_send_bufs
[i
]->rl_handle
,
1144 &buf
->rb_send_bufs
[i
]->rl_iov
);
1148 buf
->rb_send_bufs
[i
]->rl_size
= wlen
-
1149 sizeof(struct rpcrdma_req
);
1151 rep
= kmalloc(rlen
, GFP_KERNEL
);
1153 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1158 memset(rep
, 0, sizeof(struct rpcrdma_rep
));
1159 buf
->rb_recv_bufs
[i
] = rep
;
1160 buf
->rb_recv_bufs
[i
]->rr_buffer
= buf
;
1162 rc
= rpcrdma_register_internal(ia
, rep
->rr_base
,
1163 rlen
- offsetof(struct rpcrdma_rep
, rr_base
),
1164 &buf
->rb_recv_bufs
[i
]->rr_handle
,
1165 &buf
->rb_recv_bufs
[i
]->rr_iov
);
1170 dprintk("RPC: %s: max_requests %d\n",
1171 __func__
, buf
->rb_max_requests
);
1175 rpcrdma_buffer_destroy(buf
);
1180 * Unregister and destroy buffer memory. Need to deal with
1181 * partial initialization, so it's callable from failed create.
1182 * Must be called before destroying endpoint, as registrations
1186 rpcrdma_buffer_destroy(struct rpcrdma_buffer
*buf
)
1189 struct rpcrdma_ia
*ia
= rdmab_to_ia(buf
);
1190 struct rpcrdma_mw
*r
;
1192 /* clean up in reverse order from create
1193 * 1. recv mr memory (mr free, then kfree)
1194 * 2. send mr memory (mr free, then kfree)
1195 * 3. padding (if any) [moved to rpcrdma_ep_destroy]
1198 dprintk("RPC: %s: entering\n", __func__
);
1200 for (i
= 0; i
< buf
->rb_max_requests
; i
++) {
1201 if (buf
->rb_recv_bufs
&& buf
->rb_recv_bufs
[i
]) {
1202 rpcrdma_deregister_internal(ia
,
1203 buf
->rb_recv_bufs
[i
]->rr_handle
,
1204 &buf
->rb_recv_bufs
[i
]->rr_iov
);
1205 kfree(buf
->rb_recv_bufs
[i
]);
1207 if (buf
->rb_send_bufs
&& buf
->rb_send_bufs
[i
]) {
1208 rpcrdma_deregister_internal(ia
,
1209 buf
->rb_send_bufs
[i
]->rl_handle
,
1210 &buf
->rb_send_bufs
[i
]->rl_iov
);
1211 kfree(buf
->rb_send_bufs
[i
]);
1215 while (!list_empty(&buf
->rb_mws
)) {
1216 r
= list_entry(buf
->rb_mws
.next
,
1217 struct rpcrdma_mw
, mw_list
);
1218 list_del(&r
->mw_list
);
1219 switch (ia
->ri_memreg_strategy
) {
1221 rc
= ib_dereg_mr(r
->r
.frmr
.fr_mr
);
1227 ib_free_fast_reg_page_list(r
->r
.frmr
.fr_pgl
);
1229 case RPCRDMA_MTHCAFMR
:
1230 rc
= ib_dealloc_fmr(r
->r
.fmr
);
1242 kfree(buf
->rb_pool
);
1246 * Get a set of request/reply buffers.
1248 * Reply buffer (if needed) is attached to send buffer upon return.
1250 * rb_send_index and rb_recv_index MUST always be pointing to the
1251 * *next* available buffer (non-NULL). They are incremented after
1252 * removing buffers, and decremented *before* returning them.
1254 struct rpcrdma_req
*
1255 rpcrdma_buffer_get(struct rpcrdma_buffer
*buffers
)
1257 struct rpcrdma_req
*req
;
1258 unsigned long flags
;
1260 struct rpcrdma_mw
*r
;
1262 spin_lock_irqsave(&buffers
->rb_lock
, flags
);
1263 if (buffers
->rb_send_index
== buffers
->rb_max_requests
) {
1264 spin_unlock_irqrestore(&buffers
->rb_lock
, flags
);
1265 dprintk("RPC: %s: out of request buffers\n", __func__
);
1266 return ((struct rpcrdma_req
*)NULL
);
1269 req
= buffers
->rb_send_bufs
[buffers
->rb_send_index
];
1270 if (buffers
->rb_send_index
< buffers
->rb_recv_index
) {
1271 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1273 buffers
->rb_recv_index
- buffers
->rb_send_index
);
1274 req
->rl_reply
= NULL
;
1276 req
->rl_reply
= buffers
->rb_recv_bufs
[buffers
->rb_recv_index
];
1277 buffers
->rb_recv_bufs
[buffers
->rb_recv_index
++] = NULL
;
1279 buffers
->rb_send_bufs
[buffers
->rb_send_index
++] = NULL
;
1280 if (!list_empty(&buffers
->rb_mws
)) {
1281 i
= RPCRDMA_MAX_SEGS
- 1;
1283 r
= list_entry(buffers
->rb_mws
.next
,
1284 struct rpcrdma_mw
, mw_list
);
1285 list_del(&r
->mw_list
);
1286 req
->rl_segments
[i
].mr_chunk
.rl_mw
= r
;
1289 spin_unlock_irqrestore(&buffers
->rb_lock
, flags
);
1294 * Put request/reply buffers back into pool.
1295 * Pre-decrement counter/array index.
1298 rpcrdma_buffer_put(struct rpcrdma_req
*req
)
1300 struct rpcrdma_buffer
*buffers
= req
->rl_buffer
;
1301 struct rpcrdma_ia
*ia
= rdmab_to_ia(buffers
);
1303 unsigned long flags
;
1305 spin_lock_irqsave(&buffers
->rb_lock
, flags
);
1306 buffers
->rb_send_bufs
[--buffers
->rb_send_index
] = req
;
1308 if (req
->rl_reply
) {
1309 buffers
->rb_recv_bufs
[--buffers
->rb_recv_index
] = req
->rl_reply
;
1310 req
->rl_reply
->rr_func
= NULL
;
1311 req
->rl_reply
= NULL
;
1313 switch (ia
->ri_memreg_strategy
) {
1315 case RPCRDMA_MTHCAFMR
:
1317 * Cycle mw's back in reverse order, and "spin" them.
1318 * This delays and scrambles reuse as much as possible.
1322 struct rpcrdma_mw
**mw
;
1323 mw
= &req
->rl_segments
[i
].mr_chunk
.rl_mw
;
1324 list_add_tail(&(*mw
)->mw_list
, &buffers
->rb_mws
);
1326 } while (++i
< RPCRDMA_MAX_SEGS
);
1327 list_add_tail(&req
->rl_segments
[0].mr_chunk
.rl_mw
->mw_list
,
1329 req
->rl_segments
[0].mr_chunk
.rl_mw
= NULL
;
1334 spin_unlock_irqrestore(&buffers
->rb_lock
, flags
);
1338 * Recover reply buffers from pool.
1339 * This happens when recovering from error conditions.
1340 * Post-increment counter/array index.
1343 rpcrdma_recv_buffer_get(struct rpcrdma_req
*req
)
1345 struct rpcrdma_buffer
*buffers
= req
->rl_buffer
;
1346 unsigned long flags
;
1348 if (req
->rl_iov
.length
== 0) /* special case xprt_rdma_allocate() */
1349 buffers
= ((struct rpcrdma_req
*) buffers
)->rl_buffer
;
1350 spin_lock_irqsave(&buffers
->rb_lock
, flags
);
1351 if (buffers
->rb_recv_index
< buffers
->rb_max_requests
) {
1352 req
->rl_reply
= buffers
->rb_recv_bufs
[buffers
->rb_recv_index
];
1353 buffers
->rb_recv_bufs
[buffers
->rb_recv_index
++] = NULL
;
1355 spin_unlock_irqrestore(&buffers
->rb_lock
, flags
);
1359 * Put reply buffers back into pool when not attached to
1360 * request. This happens in error conditions.
1363 rpcrdma_recv_buffer_put(struct rpcrdma_rep
*rep
)
1365 struct rpcrdma_buffer
*buffers
= rep
->rr_buffer
;
1366 unsigned long flags
;
1368 rep
->rr_func
= NULL
;
1369 spin_lock_irqsave(&buffers
->rb_lock
, flags
);
1370 buffers
->rb_recv_bufs
[--buffers
->rb_recv_index
] = rep
;
1371 spin_unlock_irqrestore(&buffers
->rb_lock
, flags
);
1375 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1379 rpcrdma_register_internal(struct rpcrdma_ia
*ia
, void *va
, int len
,
1380 struct ib_mr
**mrp
, struct ib_sge
*iov
)
1382 struct ib_phys_buf ipb
;
1387 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1389 iov
->addr
= ib_dma_map_single(ia
->ri_id
->device
,
1390 va
, len
, DMA_BIDIRECTIONAL
);
1393 if (ia
->ri_have_dma_lkey
) {
1395 iov
->lkey
= ia
->ri_dma_lkey
;
1397 } else if (ia
->ri_bind_mem
!= NULL
) {
1399 iov
->lkey
= ia
->ri_bind_mem
->lkey
;
1403 ipb
.addr
= iov
->addr
;
1404 ipb
.size
= iov
->length
;
1405 mr
= ib_reg_phys_mr(ia
->ri_pd
, &ipb
, 1,
1406 IB_ACCESS_LOCAL_WRITE
, &iov
->addr
);
1408 dprintk("RPC: %s: phys convert: 0x%llx "
1409 "registered 0x%llx length %d\n",
1410 __func__
, (unsigned long long)ipb
.addr
,
1411 (unsigned long long)iov
->addr
, len
);
1416 dprintk("RPC: %s: failed with %i\n", __func__
, rc
);
1419 iov
->lkey
= mr
->lkey
;
1427 rpcrdma_deregister_internal(struct rpcrdma_ia
*ia
,
1428 struct ib_mr
*mr
, struct ib_sge
*iov
)
1432 ib_dma_unmap_single(ia
->ri_id
->device
,
1433 iov
->addr
, iov
->length
, DMA_BIDIRECTIONAL
);
1438 rc
= ib_dereg_mr(mr
);
1440 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__
, rc
);
1445 * Wrappers for chunk registration, shared by read/write chunk code.
1449 rpcrdma_map_one(struct rpcrdma_ia
*ia
, struct rpcrdma_mr_seg
*seg
, int writing
)
1451 seg
->mr_dir
= writing
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1452 seg
->mr_dmalen
= seg
->mr_len
;
1454 seg
->mr_dma
= ib_dma_map_page(ia
->ri_id
->device
,
1455 seg
->mr_page
, offset_in_page(seg
->mr_offset
),
1456 seg
->mr_dmalen
, seg
->mr_dir
);
1458 seg
->mr_dma
= ib_dma_map_single(ia
->ri_id
->device
,
1460 seg
->mr_dmalen
, seg
->mr_dir
);
1461 if (ib_dma_mapping_error(ia
->ri_id
->device
, seg
->mr_dma
)) {
1462 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1464 (unsigned long long)seg
->mr_dma
,
1465 seg
->mr_offset
, seg
->mr_dmalen
);
1470 rpcrdma_unmap_one(struct rpcrdma_ia
*ia
, struct rpcrdma_mr_seg
*seg
)
1473 ib_dma_unmap_page(ia
->ri_id
->device
,
1474 seg
->mr_dma
, seg
->mr_dmalen
, seg
->mr_dir
);
1476 ib_dma_unmap_single(ia
->ri_id
->device
,
1477 seg
->mr_dma
, seg
->mr_dmalen
, seg
->mr_dir
);
1481 rpcrdma_register_frmr_external(struct rpcrdma_mr_seg
*seg
,
1482 int *nsegs
, int writing
, struct rpcrdma_ia
*ia
,
1483 struct rpcrdma_xprt
*r_xprt
)
1485 struct rpcrdma_mr_seg
*seg1
= seg
;
1486 struct ib_send_wr invalidate_wr
, frmr_wr
, *bad_wr
, *post_wr
;
1495 pageoff
= offset_in_page(seg1
->mr_offset
);
1496 seg1
->mr_offset
-= pageoff
; /* start of page */
1497 seg1
->mr_len
+= pageoff
;
1499 if (*nsegs
> ia
->ri_max_frmr_depth
)
1500 *nsegs
= ia
->ri_max_frmr_depth
;
1501 for (page_no
= i
= 0; i
< *nsegs
;) {
1502 rpcrdma_map_one(ia
, seg
, writing
);
1504 for (seg_len
= seg
->mr_len
; seg_len
> 0; seg_len
-= PAGE_SIZE
) {
1505 seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_pgl
->
1506 page_list
[page_no
++] = pa
;
1512 /* Check for holes */
1513 if ((i
< *nsegs
&& offset_in_page(seg
->mr_offset
)) ||
1514 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
1517 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
1518 __func__
, seg1
->mr_chunk
.rl_mw
, i
);
1520 if (unlikely(seg1
->mr_chunk
.rl_mw
->r
.frmr
.state
== FRMR_IS_VALID
)) {
1521 dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n",
1523 seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
);
1524 /* Invalidate before using. */
1525 memset(&invalidate_wr
, 0, sizeof invalidate_wr
);
1526 invalidate_wr
.wr_id
= (unsigned long)(void *)seg1
->mr_chunk
.rl_mw
;
1527 invalidate_wr
.next
= &frmr_wr
;
1528 invalidate_wr
.opcode
= IB_WR_LOCAL_INV
;
1529 invalidate_wr
.send_flags
= IB_SEND_SIGNALED
;
1530 invalidate_wr
.ex
.invalidate_rkey
=
1531 seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
;
1532 DECR_CQCOUNT(&r_xprt
->rx_ep
);
1533 post_wr
= &invalidate_wr
;
1537 /* Prepare FRMR WR */
1538 memset(&frmr_wr
, 0, sizeof frmr_wr
);
1539 frmr_wr
.wr_id
= (unsigned long)(void *)seg1
->mr_chunk
.rl_mw
;
1540 frmr_wr
.opcode
= IB_WR_FAST_REG_MR
;
1541 frmr_wr
.send_flags
= IB_SEND_SIGNALED
;
1542 frmr_wr
.wr
.fast_reg
.iova_start
= seg1
->mr_dma
;
1543 frmr_wr
.wr
.fast_reg
.page_list
= seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_pgl
;
1544 frmr_wr
.wr
.fast_reg
.page_list_len
= page_no
;
1545 frmr_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
1546 frmr_wr
.wr
.fast_reg
.length
= page_no
<< PAGE_SHIFT
;
1547 if (frmr_wr
.wr
.fast_reg
.length
< len
) {
1548 while (seg1
->mr_nsegs
--)
1549 rpcrdma_unmap_one(ia
, seg
++);
1554 key
= (u8
)(seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
& 0x000000FF);
1555 ib_update_fast_reg_key(seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
, ++key
);
1557 frmr_wr
.wr
.fast_reg
.access_flags
= (writing
?
1558 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
1559 IB_ACCESS_REMOTE_READ
);
1560 frmr_wr
.wr
.fast_reg
.rkey
= seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
;
1561 DECR_CQCOUNT(&r_xprt
->rx_ep
);
1563 rc
= ib_post_send(ia
->ri_id
->qp
, post_wr
, &bad_wr
);
1566 dprintk("RPC: %s: failed ib_post_send for register,"
1567 " status %i\n", __func__
, rc
);
1569 rpcrdma_unmap_one(ia
, --seg
);
1571 seg1
->mr_rkey
= seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
;
1572 seg1
->mr_base
= seg1
->mr_dma
+ pageoff
;
1581 rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg
*seg
,
1582 struct rpcrdma_ia
*ia
, struct rpcrdma_xprt
*r_xprt
)
1584 struct rpcrdma_mr_seg
*seg1
= seg
;
1585 struct ib_send_wr invalidate_wr
, *bad_wr
;
1588 while (seg1
->mr_nsegs
--)
1589 rpcrdma_unmap_one(ia
, seg
++);
1591 memset(&invalidate_wr
, 0, sizeof invalidate_wr
);
1592 invalidate_wr
.wr_id
= (unsigned long)(void *)seg1
->mr_chunk
.rl_mw
;
1593 invalidate_wr
.opcode
= IB_WR_LOCAL_INV
;
1594 invalidate_wr
.send_flags
= IB_SEND_SIGNALED
;
1595 invalidate_wr
.ex
.invalidate_rkey
= seg1
->mr_chunk
.rl_mw
->r
.frmr
.fr_mr
->rkey
;
1596 DECR_CQCOUNT(&r_xprt
->rx_ep
);
1598 rc
= ib_post_send(ia
->ri_id
->qp
, &invalidate_wr
, &bad_wr
);
1600 dprintk("RPC: %s: failed ib_post_send for invalidate,"
1601 " status %i\n", __func__
, rc
);
1606 rpcrdma_register_fmr_external(struct rpcrdma_mr_seg
*seg
,
1607 int *nsegs
, int writing
, struct rpcrdma_ia
*ia
)
1609 struct rpcrdma_mr_seg
*seg1
= seg
;
1610 u64 physaddrs
[RPCRDMA_MAX_DATA_SEGS
];
1611 int len
, pageoff
, i
, rc
;
1613 pageoff
= offset_in_page(seg1
->mr_offset
);
1614 seg1
->mr_offset
-= pageoff
; /* start of page */
1615 seg1
->mr_len
+= pageoff
;
1617 if (*nsegs
> RPCRDMA_MAX_DATA_SEGS
)
1618 *nsegs
= RPCRDMA_MAX_DATA_SEGS
;
1619 for (i
= 0; i
< *nsegs
;) {
1620 rpcrdma_map_one(ia
, seg
, writing
);
1621 physaddrs
[i
] = seg
->mr_dma
;
1625 /* Check for holes */
1626 if ((i
< *nsegs
&& offset_in_page(seg
->mr_offset
)) ||
1627 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
1630 rc
= ib_map_phys_fmr(seg1
->mr_chunk
.rl_mw
->r
.fmr
,
1631 physaddrs
, i
, seg1
->mr_dma
);
1633 dprintk("RPC: %s: failed ib_map_phys_fmr "
1634 "%u@0x%llx+%i (%d)... status %i\n", __func__
,
1635 len
, (unsigned long long)seg1
->mr_dma
,
1638 rpcrdma_unmap_one(ia
, --seg
);
1640 seg1
->mr_rkey
= seg1
->mr_chunk
.rl_mw
->r
.fmr
->rkey
;
1641 seg1
->mr_base
= seg1
->mr_dma
+ pageoff
;
1650 rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg
*seg
,
1651 struct rpcrdma_ia
*ia
)
1653 struct rpcrdma_mr_seg
*seg1
= seg
;
1657 list_add(&seg1
->mr_chunk
.rl_mw
->r
.fmr
->list
, &l
);
1658 rc
= ib_unmap_fmr(&l
);
1659 while (seg1
->mr_nsegs
--)
1660 rpcrdma_unmap_one(ia
, seg
++);
1662 dprintk("RPC: %s: failed ib_unmap_fmr,"
1663 " status %i\n", __func__
, rc
);
1668 rpcrdma_register_external(struct rpcrdma_mr_seg
*seg
,
1669 int nsegs
, int writing
, struct rpcrdma_xprt
*r_xprt
)
1671 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
1674 switch (ia
->ri_memreg_strategy
) {
1676 #if RPCRDMA_PERSISTENT_REGISTRATION
1677 case RPCRDMA_ALLPHYSICAL
:
1678 rpcrdma_map_one(ia
, seg
, writing
);
1679 seg
->mr_rkey
= ia
->ri_bind_mem
->rkey
;
1680 seg
->mr_base
= seg
->mr_dma
;
1686 /* Registration using frmr registration */
1688 rc
= rpcrdma_register_frmr_external(seg
, &nsegs
, writing
, ia
, r_xprt
);
1691 /* Registration using fmr memory registration */
1692 case RPCRDMA_MTHCAFMR
:
1693 rc
= rpcrdma_register_fmr_external(seg
, &nsegs
, writing
, ia
);
1706 rpcrdma_deregister_external(struct rpcrdma_mr_seg
*seg
,
1707 struct rpcrdma_xprt
*r_xprt
)
1709 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
1710 int nsegs
= seg
->mr_nsegs
, rc
;
1712 switch (ia
->ri_memreg_strategy
) {
1714 #if RPCRDMA_PERSISTENT_REGISTRATION
1715 case RPCRDMA_ALLPHYSICAL
:
1716 rpcrdma_unmap_one(ia
, seg
);
1721 rc
= rpcrdma_deregister_frmr_external(seg
, ia
, r_xprt
);
1724 case RPCRDMA_MTHCAFMR
:
1725 rc
= rpcrdma_deregister_fmr_external(seg
, ia
);
1735 * Prepost any receive buffer, then post send.
1737 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1740 rpcrdma_ep_post(struct rpcrdma_ia
*ia
,
1741 struct rpcrdma_ep
*ep
,
1742 struct rpcrdma_req
*req
)
1744 struct ib_send_wr send_wr
, *send_wr_fail
;
1745 struct rpcrdma_rep
*rep
= req
->rl_reply
;
1749 rc
= rpcrdma_ep_post_recv(ia
, ep
, rep
);
1752 req
->rl_reply
= NULL
;
1755 send_wr
.next
= NULL
;
1756 send_wr
.wr_id
= 0ULL; /* no send cookie */
1757 send_wr
.sg_list
= req
->rl_send_iov
;
1758 send_wr
.num_sge
= req
->rl_niovs
;
1759 send_wr
.opcode
= IB_WR_SEND
;
1760 if (send_wr
.num_sge
== 4) /* no need to sync any pad (constant) */
1761 ib_dma_sync_single_for_device(ia
->ri_id
->device
,
1762 req
->rl_send_iov
[3].addr
, req
->rl_send_iov
[3].length
,
1764 ib_dma_sync_single_for_device(ia
->ri_id
->device
,
1765 req
->rl_send_iov
[1].addr
, req
->rl_send_iov
[1].length
,
1767 ib_dma_sync_single_for_device(ia
->ri_id
->device
,
1768 req
->rl_send_iov
[0].addr
, req
->rl_send_iov
[0].length
,
1771 if (DECR_CQCOUNT(ep
) > 0)
1772 send_wr
.send_flags
= 0;
1773 else { /* Provider must take a send completion every now and then */
1775 send_wr
.send_flags
= IB_SEND_SIGNALED
;
1778 rc
= ib_post_send(ia
->ri_id
->qp
, &send_wr
, &send_wr_fail
);
1780 dprintk("RPC: %s: ib_post_send returned %i\n", __func__
,
1787 * (Re)post a receive buffer.
1790 rpcrdma_ep_post_recv(struct rpcrdma_ia
*ia
,
1791 struct rpcrdma_ep
*ep
,
1792 struct rpcrdma_rep
*rep
)
1794 struct ib_recv_wr recv_wr
, *recv_wr_fail
;
1797 recv_wr
.next
= NULL
;
1798 recv_wr
.wr_id
= (u64
) (unsigned long) rep
;
1799 recv_wr
.sg_list
= &rep
->rr_iov
;
1800 recv_wr
.num_sge
= 1;
1802 ib_dma_sync_single_for_cpu(ia
->ri_id
->device
,
1803 rep
->rr_iov
.addr
, rep
->rr_iov
.length
, DMA_BIDIRECTIONAL
);
1805 rc
= ib_post_recv(ia
->ri_id
->qp
, &recv_wr
, &recv_wr_fail
);
1808 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__
,