2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/debug.h>
45 #include <linux/sunrpc/rpc_rdma.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 #include <linux/sunrpc/svc_rdma.h>
54 #include <linux/export.h>
55 #include "xprt_rdma.h"
57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
59 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
61 struct sockaddr
*sa
, int salen
,
63 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
64 static void svc_rdma_release_rqst(struct svc_rqst
*);
65 static void dto_tasklet_func(unsigned long data
);
66 static void svc_rdma_detach(struct svc_xprt
*xprt
);
67 static void svc_rdma_free(struct svc_xprt
*xprt
);
68 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
69 static int svc_rdma_secure_port(struct svc_rqst
*);
70 static void rq_cq_reap(struct svcxprt_rdma
*xprt
);
71 static void sq_cq_reap(struct svcxprt_rdma
*xprt
);
73 static DECLARE_TASKLET(dto_tasklet
, dto_tasklet_func
, 0UL);
74 static DEFINE_SPINLOCK(dto_lock
);
75 static LIST_HEAD(dto_xprt_q
);
77 static struct svc_xprt_ops svc_rdma_ops
= {
78 .xpo_create
= svc_rdma_create
,
79 .xpo_recvfrom
= svc_rdma_recvfrom
,
80 .xpo_sendto
= svc_rdma_sendto
,
81 .xpo_release_rqst
= svc_rdma_release_rqst
,
82 .xpo_detach
= svc_rdma_detach
,
83 .xpo_free
= svc_rdma_free
,
84 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
85 .xpo_has_wspace
= svc_rdma_has_wspace
,
86 .xpo_accept
= svc_rdma_accept
,
87 .xpo_secure_port
= svc_rdma_secure_port
,
90 struct svc_xprt_class svc_rdma_class
= {
92 .xcl_owner
= THIS_MODULE
,
93 .xcl_ops
= &svc_rdma_ops
,
94 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_RDMA
,
95 .xcl_ident
= XPRT_TRANSPORT_RDMA
,
98 struct svc_rdma_op_ctxt
*svc_rdma_get_context(struct svcxprt_rdma
*xprt
)
100 struct svc_rdma_op_ctxt
*ctxt
;
103 ctxt
= kmem_cache_alloc(svc_rdma_ctxt_cachep
, GFP_KERNEL
);
106 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
109 INIT_LIST_HEAD(&ctxt
->dto_q
);
112 atomic_inc(&xprt
->sc_ctxt_used
);
116 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt
*ctxt
)
118 struct svcxprt_rdma
*xprt
= ctxt
->xprt
;
120 for (i
= 0; i
< ctxt
->count
&& ctxt
->sge
[i
].length
; i
++) {
122 * Unmap the DMA addr in the SGE if the lkey matches
123 * the sc_dma_lkey, otherwise, ignore it since it is
124 * an FRMR lkey and will be unmapped later when the
125 * last WR that uses it completes.
127 if (ctxt
->sge
[i
].lkey
== xprt
->sc_dma_lkey
) {
128 atomic_dec(&xprt
->sc_dma_used
);
129 ib_dma_unmap_page(xprt
->sc_cm_id
->device
,
137 void svc_rdma_put_context(struct svc_rdma_op_ctxt
*ctxt
, int free_pages
)
139 struct svcxprt_rdma
*xprt
;
144 for (i
= 0; i
< ctxt
->count
; i
++)
145 put_page(ctxt
->pages
[i
]);
147 kmem_cache_free(svc_rdma_ctxt_cachep
, ctxt
);
148 atomic_dec(&xprt
->sc_ctxt_used
);
152 * Temporary NFS req mappings are shared across all transport
153 * instances. These are short lived and should be bounded by the number
154 * of concurrent server threads * depth of the SQ.
156 struct svc_rdma_req_map
*svc_rdma_get_req_map(void)
158 struct svc_rdma_req_map
*map
;
160 map
= kmem_cache_alloc(svc_rdma_map_cachep
, GFP_KERNEL
);
163 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
169 void svc_rdma_put_req_map(struct svc_rdma_req_map
*map
)
171 kmem_cache_free(svc_rdma_map_cachep
, map
);
174 /* ib_cq event handler */
175 static void cq_event_handler(struct ib_event
*event
, void *context
)
177 struct svc_xprt
*xprt
= context
;
178 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
179 event
->event
, context
);
180 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
183 /* QP event handler */
184 static void qp_event_handler(struct ib_event
*event
, void *context
)
186 struct svc_xprt
*xprt
= context
;
188 switch (event
->event
) {
189 /* These are considered benign events */
190 case IB_EVENT_PATH_MIG
:
191 case IB_EVENT_COMM_EST
:
192 case IB_EVENT_SQ_DRAINED
:
193 case IB_EVENT_QP_LAST_WQE_REACHED
:
194 dprintk("svcrdma: QP event %d received for QP=%p\n",
195 event
->event
, event
->element
.qp
);
197 /* These are considered fatal events */
198 case IB_EVENT_PATH_MIG_ERR
:
199 case IB_EVENT_QP_FATAL
:
200 case IB_EVENT_QP_REQ_ERR
:
201 case IB_EVENT_QP_ACCESS_ERR
:
202 case IB_EVENT_DEVICE_FATAL
:
204 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
205 "closing transport\n",
206 event
->event
, event
->element
.qp
);
207 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
213 * Data Transfer Operation Tasklet
215 * Walks a list of transports with I/O pending, removing entries as
216 * they are added to the server's I/O pending list. Two bits indicate
217 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
218 * spinlock that serializes access to the transport list with the RQ
219 * and SQ interrupt handlers.
221 static void dto_tasklet_func(unsigned long data
)
223 struct svcxprt_rdma
*xprt
;
226 spin_lock_irqsave(&dto_lock
, flags
);
227 while (!list_empty(&dto_xprt_q
)) {
228 xprt
= list_entry(dto_xprt_q
.next
,
229 struct svcxprt_rdma
, sc_dto_q
);
230 list_del_init(&xprt
->sc_dto_q
);
231 spin_unlock_irqrestore(&dto_lock
, flags
);
236 svc_xprt_put(&xprt
->sc_xprt
);
237 spin_lock_irqsave(&dto_lock
, flags
);
239 spin_unlock_irqrestore(&dto_lock
, flags
);
243 * Receive Queue Completion Handler
245 * Since an RQ completion handler is called on interrupt context, we
246 * need to defer the handling of the I/O to a tasklet
248 static void rq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
250 struct svcxprt_rdma
*xprt
= cq_context
;
253 /* Guard against unconditional flush call for destroyed QP */
254 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
258 * Set the bit regardless of whether or not it's on the list
259 * because it may be on the list already due to an SQ
262 set_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
);
265 * If this transport is not already on the DTO transport queue,
268 spin_lock_irqsave(&dto_lock
, flags
);
269 if (list_empty(&xprt
->sc_dto_q
)) {
270 svc_xprt_get(&xprt
->sc_xprt
);
271 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
273 spin_unlock_irqrestore(&dto_lock
, flags
);
275 /* Tasklet does all the work to avoid irqsave locks. */
276 tasklet_schedule(&dto_tasklet
);
280 * rq_cq_reap - Process the RQ CQ.
282 * Take all completing WC off the CQE and enqueue the associated DTO
283 * context on the dto_q for the transport.
285 * Note that caller must hold a transport reference.
287 static void rq_cq_reap(struct svcxprt_rdma
*xprt
)
291 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
293 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
))
296 ib_req_notify_cq(xprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
297 atomic_inc(&rdma_stat_rq_poll
);
299 while ((ret
= ib_poll_cq(xprt
->sc_rq_cq
, 1, &wc
)) > 0) {
300 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
301 ctxt
->wc_status
= wc
.status
;
302 ctxt
->byte_len
= wc
.byte_len
;
303 svc_rdma_unmap_dma(ctxt
);
304 if (wc
.status
!= IB_WC_SUCCESS
) {
305 /* Close the transport */
306 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt
);
307 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
308 svc_rdma_put_context(ctxt
, 1);
309 svc_xprt_put(&xprt
->sc_xprt
);
312 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
313 list_add_tail(&ctxt
->dto_q
, &xprt
->sc_rq_dto_q
);
314 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
315 svc_xprt_put(&xprt
->sc_xprt
);
319 atomic_inc(&rdma_stat_rq_prod
);
321 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
323 * If data arrived before established event,
324 * don't enqueue. This defers RPC I/O until the
325 * RDMA connection is complete.
327 if (!test_bit(RDMAXPRT_CONN_PENDING
, &xprt
->sc_flags
))
328 svc_xprt_enqueue(&xprt
->sc_xprt
);
332 * Process a completion context
334 static void process_context(struct svcxprt_rdma
*xprt
,
335 struct svc_rdma_op_ctxt
*ctxt
)
337 svc_rdma_unmap_dma(ctxt
);
339 switch (ctxt
->wr_op
) {
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
343 svc_rdma_put_context(ctxt
, 1);
346 case IB_WR_RDMA_WRITE
:
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
349 svc_rdma_put_context(ctxt
, 0);
352 case IB_WR_RDMA_READ
:
353 case IB_WR_RDMA_READ_WITH_INV
:
354 svc_rdma_put_frmr(xprt
, ctxt
->frmr
);
355 if (test_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
)) {
356 struct svc_rdma_op_ctxt
*read_hdr
= ctxt
->read_hdr
;
358 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
359 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
360 list_add_tail(&read_hdr
->dto_q
,
361 &xprt
->sc_read_complete_q
);
362 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
366 svc_xprt_enqueue(&xprt
->sc_xprt
);
368 svc_rdma_put_context(ctxt
, 0);
372 printk(KERN_ERR
"svcrdma: unexpected completion type, "
380 * Send Queue Completion Handler - potentially called on interrupt context.
382 * Note that caller must hold a transport reference.
384 static void sq_cq_reap(struct svcxprt_rdma
*xprt
)
386 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
387 struct ib_wc wc_a
[6];
389 struct ib_cq
*cq
= xprt
->sc_sq_cq
;
392 memset(wc_a
, 0, sizeof(wc_a
));
394 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
))
397 ib_req_notify_cq(xprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
398 atomic_inc(&rdma_stat_sq_poll
);
399 while ((ret
= ib_poll_cq(cq
, ARRAY_SIZE(wc_a
), wc_a
)) > 0) {
402 for (i
= 0; i
< ret
; i
++) {
404 if (wc
->status
!= IB_WC_SUCCESS
) {
405 dprintk("svcrdma: sq wc err status %d\n",
408 /* Close the transport */
409 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
412 /* Decrement used SQ WR count */
413 atomic_dec(&xprt
->sc_sq_count
);
414 wake_up(&xprt
->sc_send_wait
);
416 ctxt
= (struct svc_rdma_op_ctxt
*)
417 (unsigned long)wc
->wr_id
;
419 process_context(xprt
, ctxt
);
421 svc_xprt_put(&xprt
->sc_xprt
);
426 atomic_inc(&rdma_stat_sq_prod
);
429 static void sq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
431 struct svcxprt_rdma
*xprt
= cq_context
;
434 /* Guard against unconditional flush call for destroyed QP */
435 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
439 * Set the bit regardless of whether or not it's on the list
440 * because it may be on the list already due to an RQ
443 set_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
);
446 * If this transport is not already on the DTO transport queue,
449 spin_lock_irqsave(&dto_lock
, flags
);
450 if (list_empty(&xprt
->sc_dto_q
)) {
451 svc_xprt_get(&xprt
->sc_xprt
);
452 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
454 spin_unlock_irqrestore(&dto_lock
, flags
);
456 /* Tasklet does all the work to avoid irqsave locks. */
457 tasklet_schedule(&dto_tasklet
);
460 static struct svcxprt_rdma
*rdma_create_xprt(struct svc_serv
*serv
,
463 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
467 svc_xprt_init(&init_net
, &svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
468 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
469 INIT_LIST_HEAD(&cma_xprt
->sc_dto_q
);
470 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
471 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
472 INIT_LIST_HEAD(&cma_xprt
->sc_frmr_q
);
473 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
475 spin_lock_init(&cma_xprt
->sc_lock
);
476 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
477 spin_lock_init(&cma_xprt
->sc_frmr_q_lock
);
479 cma_xprt
->sc_ord
= svcrdma_ord
;
481 cma_xprt
->sc_max_req_size
= svcrdma_max_req_size
;
482 cma_xprt
->sc_max_requests
= svcrdma_max_requests
;
483 cma_xprt
->sc_sq_depth
= svcrdma_max_requests
* RPCRDMA_SQ_DEPTH_MULT
;
484 atomic_set(&cma_xprt
->sc_sq_count
, 0);
485 atomic_set(&cma_xprt
->sc_ctxt_used
, 0);
488 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
493 struct page
*svc_rdma_get_page(void)
497 while ((page
= alloc_page(GFP_KERNEL
)) == NULL
) {
498 /* If we can't get memory, wait a bit and try again */
499 printk(KERN_INFO
"svcrdma: out of memory...retrying in 1s\n");
500 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
505 int svc_rdma_post_recv(struct svcxprt_rdma
*xprt
)
507 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
508 struct svc_rdma_op_ctxt
*ctxt
;
515 ctxt
= svc_rdma_get_context(xprt
);
517 ctxt
->direction
= DMA_FROM_DEVICE
;
518 for (sge_no
= 0; buflen
< xprt
->sc_max_req_size
; sge_no
++) {
519 if (sge_no
>= xprt
->sc_max_sge
) {
520 pr_err("svcrdma: Too many sges (%d)\n", sge_no
);
523 page
= svc_rdma_get_page();
524 ctxt
->pages
[sge_no
] = page
;
525 pa
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
528 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
, pa
))
530 atomic_inc(&xprt
->sc_dma_used
);
531 ctxt
->sge
[sge_no
].addr
= pa
;
532 ctxt
->sge
[sge_no
].length
= PAGE_SIZE
;
533 ctxt
->sge
[sge_no
].lkey
= xprt
->sc_dma_lkey
;
534 ctxt
->count
= sge_no
+ 1;
538 recv_wr
.sg_list
= &ctxt
->sge
[0];
539 recv_wr
.num_sge
= ctxt
->count
;
540 recv_wr
.wr_id
= (u64
)(unsigned long)ctxt
;
542 svc_xprt_get(&xprt
->sc_xprt
);
543 ret
= ib_post_recv(xprt
->sc_qp
, &recv_wr
, &bad_recv_wr
);
545 svc_rdma_unmap_dma(ctxt
);
546 svc_rdma_put_context(ctxt
, 1);
547 svc_xprt_put(&xprt
->sc_xprt
);
552 svc_rdma_unmap_dma(ctxt
);
553 svc_rdma_put_context(ctxt
, 1);
558 * This function handles the CONNECT_REQUEST event on a listening
559 * endpoint. It is passed the cma_id for the _new_ connection. The context in
560 * this cma_id is inherited from the listening cma_id and is the svc_xprt
561 * structure for the listening endpoint.
563 * This function creates a new xprt for the new connection and enqueues it on
564 * the accept queue for the listent xprt. When the listen thread is kicked, it
565 * will call the recvfrom method on the listen xprt which will accept the new
568 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
, size_t client_ird
)
570 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
571 struct svcxprt_rdma
*newxprt
;
574 /* Create a new transport */
575 newxprt
= rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
, 0);
577 dprintk("svcrdma: failed to create new transport\n");
580 newxprt
->sc_cm_id
= new_cma_id
;
581 new_cma_id
->context
= newxprt
;
582 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
583 newxprt
, newxprt
->sc_cm_id
, listen_xprt
);
585 /* Save client advertised inbound read limit for use later in accept. */
586 newxprt
->sc_ord
= client_ird
;
588 /* Set the local and remote addresses in the transport */
589 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
590 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
591 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
592 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
595 * Enqueue the new transport on the accept queue of the listening
598 spin_lock_bh(&listen_xprt
->sc_lock
);
599 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
600 spin_unlock_bh(&listen_xprt
->sc_lock
);
602 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
603 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
607 * Handles events generated on the listening endpoint. These events will be
608 * either be incoming connect requests or adapter removal events.
610 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
611 struct rdma_cm_event
*event
)
613 struct svcxprt_rdma
*xprt
= cma_id
->context
;
616 switch (event
->event
) {
617 case RDMA_CM_EVENT_CONNECT_REQUEST
:
618 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
619 "event=%d\n", cma_id
, cma_id
->context
, event
->event
);
620 handle_connect_req(cma_id
,
621 event
->param
.conn
.initiator_depth
);
624 case RDMA_CM_EVENT_ESTABLISHED
:
625 /* Accept complete */
626 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
627 "cm_id=%p\n", xprt
, cma_id
);
630 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
631 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
634 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
638 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
639 "event=%d\n", cma_id
, event
->event
);
646 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
647 struct rdma_cm_event
*event
)
649 struct svc_xprt
*xprt
= cma_id
->context
;
650 struct svcxprt_rdma
*rdma
=
651 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
652 switch (event
->event
) {
653 case RDMA_CM_EVENT_ESTABLISHED
:
654 /* Accept complete */
656 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
657 "cm_id=%p\n", xprt
, cma_id
);
658 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
659 svc_xprt_enqueue(xprt
);
661 case RDMA_CM_EVENT_DISCONNECTED
:
662 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
665 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
666 svc_xprt_enqueue(xprt
);
670 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
671 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
672 "event=%d\n", cma_id
, xprt
, event
->event
);
674 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
675 svc_xprt_enqueue(xprt
);
679 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
680 "event=%d\n", cma_id
, event
->event
);
687 * Create a listening RDMA service endpoint.
689 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
691 struct sockaddr
*sa
, int salen
,
694 struct rdma_cm_id
*listen_id
;
695 struct svcxprt_rdma
*cma_xprt
;
698 dprintk("svcrdma: Creating RDMA socket\n");
699 if (sa
->sa_family
!= AF_INET
) {
700 dprintk("svcrdma: Address family %d is not supported.\n", sa
->sa_family
);
701 return ERR_PTR(-EAFNOSUPPORT
);
703 cma_xprt
= rdma_create_xprt(serv
, 1);
705 return ERR_PTR(-ENOMEM
);
707 listen_id
= rdma_create_id(rdma_listen_handler
, cma_xprt
, RDMA_PS_TCP
,
709 if (IS_ERR(listen_id
)) {
710 ret
= PTR_ERR(listen_id
);
711 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
715 ret
= rdma_bind_addr(listen_id
, sa
);
717 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
720 cma_xprt
->sc_cm_id
= listen_id
;
722 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
724 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
729 * We need to use the address from the cm_id in case the
730 * caller specified 0 for the port number.
732 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
733 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
735 return &cma_xprt
->sc_xprt
;
738 rdma_destroy_id(listen_id
);
744 static struct svc_rdma_fastreg_mr
*rdma_alloc_frmr(struct svcxprt_rdma
*xprt
)
747 struct ib_fast_reg_page_list
*pl
;
748 struct svc_rdma_fastreg_mr
*frmr
;
750 frmr
= kmalloc(sizeof(*frmr
), GFP_KERNEL
);
754 mr
= ib_alloc_fast_reg_mr(xprt
->sc_pd
, RPCSVC_MAXPAGES
);
758 pl
= ib_alloc_fast_reg_page_list(xprt
->sc_cm_id
->device
,
764 frmr
->page_list
= pl
;
765 INIT_LIST_HEAD(&frmr
->frmr_list
);
773 return ERR_PTR(-ENOMEM
);
776 static void rdma_dealloc_frmr_q(struct svcxprt_rdma
*xprt
)
778 struct svc_rdma_fastreg_mr
*frmr
;
780 while (!list_empty(&xprt
->sc_frmr_q
)) {
781 frmr
= list_entry(xprt
->sc_frmr_q
.next
,
782 struct svc_rdma_fastreg_mr
, frmr_list
);
783 list_del_init(&frmr
->frmr_list
);
784 ib_dereg_mr(frmr
->mr
);
785 ib_free_fast_reg_page_list(frmr
->page_list
);
790 struct svc_rdma_fastreg_mr
*svc_rdma_get_frmr(struct svcxprt_rdma
*rdma
)
792 struct svc_rdma_fastreg_mr
*frmr
= NULL
;
794 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
795 if (!list_empty(&rdma
->sc_frmr_q
)) {
796 frmr
= list_entry(rdma
->sc_frmr_q
.next
,
797 struct svc_rdma_fastreg_mr
, frmr_list
);
798 list_del_init(&frmr
->frmr_list
);
800 frmr
->page_list_len
= 0;
802 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
806 return rdma_alloc_frmr(rdma
);
809 static void frmr_unmap_dma(struct svcxprt_rdma
*xprt
,
810 struct svc_rdma_fastreg_mr
*frmr
)
813 for (page_no
= 0; page_no
< frmr
->page_list_len
; page_no
++) {
814 dma_addr_t addr
= frmr
->page_list
->page_list
[page_no
];
815 if (ib_dma_mapping_error(frmr
->mr
->device
, addr
))
817 atomic_dec(&xprt
->sc_dma_used
);
818 ib_dma_unmap_page(frmr
->mr
->device
, addr
, PAGE_SIZE
,
823 void svc_rdma_put_frmr(struct svcxprt_rdma
*rdma
,
824 struct svc_rdma_fastreg_mr
*frmr
)
827 frmr_unmap_dma(rdma
, frmr
);
828 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
829 WARN_ON_ONCE(!list_empty(&frmr
->frmr_list
));
830 list_add(&frmr
->frmr_list
, &rdma
->sc_frmr_q
);
831 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
836 * This is the xpo_recvfrom function for listening endpoints. Its
837 * purpose is to accept incoming connections. The CMA callback handler
838 * has already created a new transport and attached it to the new CMA
841 * There is a queue of pending connections hung on the listening
842 * transport. This queue contains the new svc_xprt structure. This
843 * function takes svc_xprt structures off the accept_q and completes
846 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
848 struct svcxprt_rdma
*listen_rdma
;
849 struct svcxprt_rdma
*newxprt
= NULL
;
850 struct rdma_conn_param conn_param
;
851 struct ib_qp_init_attr qp_attr
;
852 struct ib_device_attr devattr
;
853 int uninitialized_var(dma_mr_acc
);
858 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
859 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
860 /* Get the next entry off the accept list */
861 spin_lock_bh(&listen_rdma
->sc_lock
);
862 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
863 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
864 struct svcxprt_rdma
, sc_accept_q
);
865 list_del_init(&newxprt
->sc_accept_q
);
867 if (!list_empty(&listen_rdma
->sc_accept_q
))
868 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
869 spin_unlock_bh(&listen_rdma
->sc_lock
);
873 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
874 newxprt
, newxprt
->sc_cm_id
);
876 ret
= ib_query_device(newxprt
->sc_cm_id
->device
, &devattr
);
878 dprintk("svcrdma: could not query device attributes on "
879 "device %p, rc=%d\n", newxprt
->sc_cm_id
->device
, ret
);
883 /* Qualify the transport resource defaults with the
884 * capabilities of this particular device */
885 newxprt
->sc_max_sge
= min((size_t)devattr
.max_sge
,
886 (size_t)RPCSVC_MAXPAGES
);
887 newxprt
->sc_max_requests
= min((size_t)devattr
.max_qp_wr
,
888 (size_t)svcrdma_max_requests
);
889 newxprt
->sc_sq_depth
= RPCRDMA_SQ_DEPTH_MULT
* newxprt
->sc_max_requests
;
892 * Limit ORD based on client limit, local device limit, and
893 * configured svcrdma limit.
895 newxprt
->sc_ord
= min_t(size_t, devattr
.max_qp_rd_atom
, newxprt
->sc_ord
);
896 newxprt
->sc_ord
= min_t(size_t, svcrdma_ord
, newxprt
->sc_ord
);
898 newxprt
->sc_pd
= ib_alloc_pd(newxprt
->sc_cm_id
->device
);
899 if (IS_ERR(newxprt
->sc_pd
)) {
900 dprintk("svcrdma: error creating PD for connect request\n");
903 newxprt
->sc_sq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
907 newxprt
->sc_sq_depth
,
909 if (IS_ERR(newxprt
->sc_sq_cq
)) {
910 dprintk("svcrdma: error creating SQ CQ for connect request\n");
913 newxprt
->sc_rq_cq
= ib_create_cq(newxprt
->sc_cm_id
->device
,
917 newxprt
->sc_max_requests
,
919 if (IS_ERR(newxprt
->sc_rq_cq
)) {
920 dprintk("svcrdma: error creating RQ CQ for connect request\n");
924 memset(&qp_attr
, 0, sizeof qp_attr
);
925 qp_attr
.event_handler
= qp_event_handler
;
926 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
927 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
;
928 qp_attr
.cap
.max_recv_wr
= newxprt
->sc_max_requests
;
929 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_sge
;
930 qp_attr
.cap
.max_recv_sge
= newxprt
->sc_max_sge
;
931 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
932 qp_attr
.qp_type
= IB_QPT_RC
;
933 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
934 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
935 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
936 " cm_id->device=%p, sc_pd->device=%p\n"
937 " cap.max_send_wr = %d\n"
938 " cap.max_recv_wr = %d\n"
939 " cap.max_send_sge = %d\n"
940 " cap.max_recv_sge = %d\n",
941 newxprt
->sc_cm_id
, newxprt
->sc_pd
,
942 newxprt
->sc_cm_id
->device
, newxprt
->sc_pd
->device
,
943 qp_attr
.cap
.max_send_wr
,
944 qp_attr
.cap
.max_recv_wr
,
945 qp_attr
.cap
.max_send_sge
,
946 qp_attr
.cap
.max_recv_sge
);
948 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
950 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
953 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
956 * Use the most secure set of MR resources based on the
957 * transport type and available memory management features in
958 * the device. Here's the table implemented below:
960 * Fast Global DMA Remote WR
962 * Sup'd Sup'd Needed Needed
974 * NB: iWARP requires remote write access for the data sink
975 * of an RDMA_READ. IB does not.
977 if (devattr
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
978 newxprt
->sc_frmr_pg_list_len
=
979 devattr
.max_fast_reg_page_list_len
;
980 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_FAST_REG
;
984 * Determine if a DMA MR is required and if so, what privs are required
986 switch (rdma_node_get_transport(newxprt
->sc_cm_id
->device
->node_type
)) {
987 case RDMA_TRANSPORT_IWARP
:
988 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_READ_W_INV
;
989 if (!(newxprt
->sc_dev_caps
& SVCRDMA_DEVCAP_FAST_REG
)) {
992 (IB_ACCESS_LOCAL_WRITE
|
993 IB_ACCESS_REMOTE_WRITE
);
994 } else if (!(devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)) {
996 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
1000 case RDMA_TRANSPORT_IB
:
1001 if (!(newxprt
->sc_dev_caps
& SVCRDMA_DEVCAP_FAST_REG
)) {
1003 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
1004 } else if (!(devattr
.device_cap_flags
&
1005 IB_DEVICE_LOCAL_DMA_LKEY
)) {
1007 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
1015 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1017 /* Register all of physical memory */
1018 newxprt
->sc_phys_mr
=
1019 ib_get_dma_mr(newxprt
->sc_pd
, dma_mr_acc
);
1020 if (IS_ERR(newxprt
->sc_phys_mr
)) {
1021 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1025 newxprt
->sc_dma_lkey
= newxprt
->sc_phys_mr
->lkey
;
1027 newxprt
->sc_dma_lkey
=
1028 newxprt
->sc_cm_id
->device
->local_dma_lkey
;
1030 /* Post receive buffers */
1031 for (i
= 0; i
< newxprt
->sc_max_requests
; i
++) {
1032 ret
= svc_rdma_post_recv(newxprt
);
1034 dprintk("svcrdma: failure posting receive buffers\n");
1039 /* Swap out the handler */
1040 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
1043 * Arm the CQs for the SQ and RQ before accepting so we can't
1044 * miss the first message
1046 ib_req_notify_cq(newxprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
1047 ib_req_notify_cq(newxprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
1049 /* Accept Connection */
1050 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
1051 memset(&conn_param
, 0, sizeof conn_param
);
1052 conn_param
.responder_resources
= 0;
1053 conn_param
.initiator_depth
= newxprt
->sc_ord
;
1054 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
1056 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1061 dprintk("svcrdma: new connection %p accepted with the following "
1063 " local_ip : %pI4\n"
1064 " local_port : %d\n"
1065 " remote_ip : %pI4\n"
1066 " remote_port : %d\n"
1069 " max_requests : %d\n"
1072 &((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1073 route
.addr
.src_addr
)->sin_addr
.s_addr
,
1074 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1075 route
.addr
.src_addr
)->sin_port
),
1076 &((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1077 route
.addr
.dst_addr
)->sin_addr
.s_addr
,
1078 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1079 route
.addr
.dst_addr
)->sin_port
),
1080 newxprt
->sc_max_sge
,
1081 newxprt
->sc_sq_depth
,
1082 newxprt
->sc_max_requests
,
1085 return &newxprt
->sc_xprt
;
1088 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
1089 /* Take a reference in case the DTO handler runs */
1090 svc_xprt_get(&newxprt
->sc_xprt
);
1091 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
1092 ib_destroy_qp(newxprt
->sc_qp
);
1093 rdma_destroy_id(newxprt
->sc_cm_id
);
1094 /* This call to put will destroy the transport */
1095 svc_xprt_put(&newxprt
->sc_xprt
);
1099 static void svc_rdma_release_rqst(struct svc_rqst
*rqstp
)
1104 * When connected, an svc_xprt has at least two references:
1106 * - A reference held by the cm_id between the ESTABLISHED and
1107 * DISCONNECTED events. If the remote peer disconnected first, this
1108 * reference could be gone.
1110 * - A reference held by the svc_recv code that called this function
1111 * as part of close processing.
1113 * At a minimum one references should still be held.
1115 static void svc_rdma_detach(struct svc_xprt
*xprt
)
1117 struct svcxprt_rdma
*rdma
=
1118 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1119 dprintk("svc: svc_rdma_detach(%p)\n", xprt
);
1121 /* Disconnect and flush posted WQE */
1122 rdma_disconnect(rdma
->sc_cm_id
);
1125 static void __svc_rdma_free(struct work_struct
*work
)
1127 struct svcxprt_rdma
*rdma
=
1128 container_of(work
, struct svcxprt_rdma
, sc_work
);
1129 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma
);
1131 /* We should only be called from kref_put */
1132 if (atomic_read(&rdma
->sc_xprt
.xpt_ref
.refcount
) != 0)
1133 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1134 atomic_read(&rdma
->sc_xprt
.xpt_ref
.refcount
));
1137 * Destroy queued, but not processed read completions. Note
1138 * that this cleanup has to be done before destroying the
1139 * cm_id because the device ptr is needed to unmap the dma in
1140 * svc_rdma_put_context.
1142 while (!list_empty(&rdma
->sc_read_complete_q
)) {
1143 struct svc_rdma_op_ctxt
*ctxt
;
1144 ctxt
= list_entry(rdma
->sc_read_complete_q
.next
,
1145 struct svc_rdma_op_ctxt
,
1147 list_del_init(&ctxt
->dto_q
);
1148 svc_rdma_put_context(ctxt
, 1);
1151 /* Destroy queued, but not processed recv completions */
1152 while (!list_empty(&rdma
->sc_rq_dto_q
)) {
1153 struct svc_rdma_op_ctxt
*ctxt
;
1154 ctxt
= list_entry(rdma
->sc_rq_dto_q
.next
,
1155 struct svc_rdma_op_ctxt
,
1157 list_del_init(&ctxt
->dto_q
);
1158 svc_rdma_put_context(ctxt
, 1);
1161 /* Warn if we leaked a resource or under-referenced */
1162 if (atomic_read(&rdma
->sc_ctxt_used
) != 0)
1163 pr_err("svcrdma: ctxt still in use? (%d)\n",
1164 atomic_read(&rdma
->sc_ctxt_used
));
1165 if (atomic_read(&rdma
->sc_dma_used
) != 0)
1166 pr_err("svcrdma: dma still in use? (%d)\n",
1167 atomic_read(&rdma
->sc_dma_used
));
1169 /* De-allocate fastreg mr */
1170 rdma_dealloc_frmr_q(rdma
);
1172 /* Destroy the QP if present (not a listener) */
1173 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
1174 ib_destroy_qp(rdma
->sc_qp
);
1176 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
1177 ib_destroy_cq(rdma
->sc_sq_cq
);
1179 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
1180 ib_destroy_cq(rdma
->sc_rq_cq
);
1182 if (rdma
->sc_phys_mr
&& !IS_ERR(rdma
->sc_phys_mr
))
1183 ib_dereg_mr(rdma
->sc_phys_mr
);
1185 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
1186 ib_dealloc_pd(rdma
->sc_pd
);
1188 /* Destroy the CM ID */
1189 rdma_destroy_id(rdma
->sc_cm_id
);
1194 static void svc_rdma_free(struct svc_xprt
*xprt
)
1196 struct svcxprt_rdma
*rdma
=
1197 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1198 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
1199 queue_work(svc_rdma_wq
, &rdma
->sc_work
);
1202 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
1204 struct svcxprt_rdma
*rdma
=
1205 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1208 * If there are already waiters on the SQ,
1211 if (waitqueue_active(&rdma
->sc_send_wait
))
1214 /* Otherwise return true. */
1218 static int svc_rdma_secure_port(struct svc_rqst
*rqstp
)
1224 * Attempt to register the kvec representing the RPC memory with the
1228 * NULL : The device does not support fastreg or there were no more
1230 * frmr : The kvec register request was successfully posted.
1231 * <0 : An error was encountered attempting to register the kvec.
1233 int svc_rdma_fastreg(struct svcxprt_rdma
*xprt
,
1234 struct svc_rdma_fastreg_mr
*frmr
)
1236 struct ib_send_wr fastreg_wr
;
1240 key
= (u8
)(frmr
->mr
->lkey
& 0x000000FF);
1241 ib_update_fast_reg_key(frmr
->mr
, ++key
);
1243 /* Prepare FASTREG WR */
1244 memset(&fastreg_wr
, 0, sizeof fastreg_wr
);
1245 fastreg_wr
.opcode
= IB_WR_FAST_REG_MR
;
1246 fastreg_wr
.send_flags
= IB_SEND_SIGNALED
;
1247 fastreg_wr
.wr
.fast_reg
.iova_start
= (unsigned long)frmr
->kva
;
1248 fastreg_wr
.wr
.fast_reg
.page_list
= frmr
->page_list
;
1249 fastreg_wr
.wr
.fast_reg
.page_list_len
= frmr
->page_list_len
;
1250 fastreg_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
1251 fastreg_wr
.wr
.fast_reg
.length
= frmr
->map_len
;
1252 fastreg_wr
.wr
.fast_reg
.access_flags
= frmr
->access_flags
;
1253 fastreg_wr
.wr
.fast_reg
.rkey
= frmr
->mr
->lkey
;
1254 return svc_rdma_send(xprt
, &fastreg_wr
);
1257 int svc_rdma_send(struct svcxprt_rdma
*xprt
, struct ib_send_wr
*wr
)
1259 struct ib_send_wr
*bad_wr
, *n_wr
;
1264 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1268 for (n_wr
= wr
->next
; n_wr
; n_wr
= n_wr
->next
)
1271 /* If the SQ is full, wait until an SQ entry is available */
1273 spin_lock_bh(&xprt
->sc_lock
);
1274 if (xprt
->sc_sq_depth
< atomic_read(&xprt
->sc_sq_count
) + wr_count
) {
1275 spin_unlock_bh(&xprt
->sc_lock
);
1276 atomic_inc(&rdma_stat_sq_starve
);
1278 /* See if we can opportunistically reap SQ WR to make room */
1281 /* Wait until SQ WR available if SQ still full */
1282 wait_event(xprt
->sc_send_wait
,
1283 atomic_read(&xprt
->sc_sq_count
) <
1285 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1289 /* Take a transport ref for each WR posted */
1290 for (i
= 0; i
< wr_count
; i
++)
1291 svc_xprt_get(&xprt
->sc_xprt
);
1293 /* Bump used SQ WR count and post */
1294 atomic_add(wr_count
, &xprt
->sc_sq_count
);
1295 ret
= ib_post_send(xprt
->sc_qp
, wr
, &bad_wr
);
1297 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
1298 atomic_sub(wr_count
, &xprt
->sc_sq_count
);
1299 for (i
= 0; i
< wr_count
; i
++)
1300 svc_xprt_put(&xprt
->sc_xprt
);
1301 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1302 "sc_sq_count=%d, sc_sq_depth=%d\n",
1303 ret
, atomic_read(&xprt
->sc_sq_count
),
1306 spin_unlock_bh(&xprt
->sc_lock
);
1308 wake_up(&xprt
->sc_send_wait
);
1314 void svc_rdma_send_error(struct svcxprt_rdma
*xprt
, struct rpcrdma_msg
*rmsgp
,
1315 enum rpcrdma_errcode err
)
1317 struct ib_send_wr err_wr
;
1319 struct svc_rdma_op_ctxt
*ctxt
;
1324 p
= svc_rdma_get_page();
1325 va
= page_address(p
);
1327 /* XDR encode error */
1328 length
= svc_rdma_xdr_encode_error(xprt
, rmsgp
, err
, va
);
1330 ctxt
= svc_rdma_get_context(xprt
);
1331 ctxt
->direction
= DMA_FROM_DEVICE
;
1335 /* Prepare SGE for local address */
1336 ctxt
->sge
[0].addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
1337 p
, 0, length
, DMA_FROM_DEVICE
);
1338 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
, ctxt
->sge
[0].addr
)) {
1340 svc_rdma_put_context(ctxt
, 1);
1343 atomic_inc(&xprt
->sc_dma_used
);
1344 ctxt
->sge
[0].lkey
= xprt
->sc_dma_lkey
;
1345 ctxt
->sge
[0].length
= length
;
1347 /* Prepare SEND WR */
1348 memset(&err_wr
, 0, sizeof err_wr
);
1349 ctxt
->wr_op
= IB_WR_SEND
;
1350 err_wr
.wr_id
= (unsigned long)ctxt
;
1351 err_wr
.sg_list
= ctxt
->sge
;
1353 err_wr
.opcode
= IB_WR_SEND
;
1354 err_wr
.send_flags
= IB_SEND_SIGNALED
;
1357 ret
= svc_rdma_send(xprt
, &err_wr
);
1359 dprintk("svcrdma: Error %d posting send for protocol error\n",
1361 svc_rdma_unmap_dma(ctxt
);
1362 svc_rdma_put_context(ctxt
, 1);