nfs: close NFSv4 COMMIT vs. CLOSE race
[deliverable/linux.git] / net / sunrpc / xprtrdma / svc_rdma_transport.c
CommitLineData
377f9b2f
TT
1/*
2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/svc_xprt.h>
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
d43c36dc 45#include <linux/sched.h>
5a0e3ad6 46#include <linux/slab.h>
377f9b2f 47#include <linux/spinlock.h>
a25e758c 48#include <linux/workqueue.h>
377f9b2f
TT
49#include <rdma/ib_verbs.h>
50#include <rdma/rdma_cm.h>
51#include <linux/sunrpc/svc_rdma.h>
52
53#define RPCDBG_FACILITY RPCDBG_SVCXPRT
54
55static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
62832c03 56 struct net *net,
377f9b2f
TT
57 struct sockaddr *sa, int salen,
58 int flags);
59static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
60static void svc_rdma_release_rqst(struct svc_rqst *);
377f9b2f
TT
61static void dto_tasklet_func(unsigned long data);
62static void svc_rdma_detach(struct svc_xprt *xprt);
63static void svc_rdma_free(struct svc_xprt *xprt);
64static int svc_rdma_has_wspace(struct svc_xprt *xprt);
65static void rq_cq_reap(struct svcxprt_rdma *xprt);
66static void sq_cq_reap(struct svcxprt_rdma *xprt);
67
5eaa65b2 68static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
377f9b2f
TT
69static DEFINE_SPINLOCK(dto_lock);
70static LIST_HEAD(dto_xprt_q);
71
72static struct svc_xprt_ops svc_rdma_ops = {
73 .xpo_create = svc_rdma_create,
74 .xpo_recvfrom = svc_rdma_recvfrom,
75 .xpo_sendto = svc_rdma_sendto,
76 .xpo_release_rqst = svc_rdma_release_rqst,
77 .xpo_detach = svc_rdma_detach,
78 .xpo_free = svc_rdma_free,
79 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
80 .xpo_has_wspace = svc_rdma_has_wspace,
81 .xpo_accept = svc_rdma_accept,
82};
83
84struct svc_xprt_class svc_rdma_class = {
85 .xcl_name = "rdma",
86 .xcl_owner = THIS_MODULE,
87 .xcl_ops = &svc_rdma_ops,
88 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
89};
90
8948896c
TT
91/* WR context cache. Created in svc_rdma.c */
92extern struct kmem_cache *svc_rdma_ctxt_cachep;
377f9b2f 93
a25e758c
TH
94/* Workqueue created in svc_rdma.c */
95extern struct workqueue_struct *svc_rdma_wq;
96
377f9b2f
TT
97struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
98{
99 struct svc_rdma_op_ctxt *ctxt;
100
101 while (1) {
8948896c
TT
102 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
103 if (ctxt)
104 break;
105 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
377f9b2f 106 }
8948896c
TT
107 ctxt->xprt = xprt;
108 INIT_LIST_HEAD(&ctxt->dto_q);
109 ctxt->count = 0;
64be8608 110 ctxt->frmr = NULL;
8948896c 111 atomic_inc(&xprt->sc_ctxt_used);
377f9b2f
TT
112 return ctxt;
113}
114
146b6df6 115void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
e6ab9143
TT
116{
117 struct svcxprt_rdma *xprt = ctxt->xprt;
118 int i;
119 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
64be8608
TT
120 /*
121 * Unmap the DMA addr in the SGE if the lkey matches
122 * the sc_dma_lkey, otherwise, ignore it since it is
123 * an FRMR lkey and will be unmapped later when the
124 * last WR that uses it completes.
125 */
126 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
127 atomic_dec(&xprt->sc_dma_used);
b432e6b3 128 ib_dma_unmap_page(xprt->sc_cm_id->device,
64be8608
TT
129 ctxt->sge[i].addr,
130 ctxt->sge[i].length,
131 ctxt->direction);
132 }
e6ab9143
TT
133 }
134}
135
377f9b2f
TT
136void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
137{
138 struct svcxprt_rdma *xprt;
139 int i;
140
141 BUG_ON(!ctxt);
142 xprt = ctxt->xprt;
143 if (free_pages)
144 for (i = 0; i < ctxt->count; i++)
145 put_page(ctxt->pages[i]);
146
8948896c 147 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
87407673 148 atomic_dec(&xprt->sc_ctxt_used);
377f9b2f
TT
149}
150
ab96dddb
TT
151/* Temporary NFS request map cache. Created in svc_rdma.c */
152extern struct kmem_cache *svc_rdma_map_cachep;
153
154/*
155 * Temporary NFS req mappings are shared across all transport
156 * instances. These are short lived and should be bounded by the number
157 * of concurrent server threads * depth of the SQ.
158 */
159struct svc_rdma_req_map *svc_rdma_get_req_map(void)
160{
161 struct svc_rdma_req_map *map;
162 while (1) {
163 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
164 if (map)
165 break;
166 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
167 }
168 map->count = 0;
64be8608 169 map->frmr = NULL;
ab96dddb
TT
170 return map;
171}
172
173void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
174{
175 kmem_cache_free(svc_rdma_map_cachep, map);
176}
177
377f9b2f
TT
178/* ib_cq event handler */
179static void cq_event_handler(struct ib_event *event, void *context)
180{
181 struct svc_xprt *xprt = context;
182 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
183 event->event, context);
184 set_bit(XPT_CLOSE, &xprt->xpt_flags);
185}
186
187/* QP event handler */
188static void qp_event_handler(struct ib_event *event, void *context)
189{
190 struct svc_xprt *xprt = context;
191
192 switch (event->event) {
193 /* These are considered benign events */
194 case IB_EVENT_PATH_MIG:
195 case IB_EVENT_COMM_EST:
196 case IB_EVENT_SQ_DRAINED:
197 case IB_EVENT_QP_LAST_WQE_REACHED:
198 dprintk("svcrdma: QP event %d received for QP=%p\n",
199 event->event, event->element.qp);
200 break;
201 /* These are considered fatal events */
202 case IB_EVENT_PATH_MIG_ERR:
203 case IB_EVENT_QP_FATAL:
204 case IB_EVENT_QP_REQ_ERR:
205 case IB_EVENT_QP_ACCESS_ERR:
206 case IB_EVENT_DEVICE_FATAL:
207 default:
208 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
209 "closing transport\n",
210 event->event, event->element.qp);
211 set_bit(XPT_CLOSE, &xprt->xpt_flags);
212 break;
213 }
214}
215
216/*
217 * Data Transfer Operation Tasklet
218 *
219 * Walks a list of transports with I/O pending, removing entries as
220 * they are added to the server's I/O pending list. Two bits indicate
221 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
222 * spinlock that serializes access to the transport list with the RQ
223 * and SQ interrupt handlers.
224 */
225static void dto_tasklet_func(unsigned long data)
226{
227 struct svcxprt_rdma *xprt;
228 unsigned long flags;
229
230 spin_lock_irqsave(&dto_lock, flags);
231 while (!list_empty(&dto_xprt_q)) {
232 xprt = list_entry(dto_xprt_q.next,
233 struct svcxprt_rdma, sc_dto_q);
234 list_del_init(&xprt->sc_dto_q);
235 spin_unlock_irqrestore(&dto_lock, flags);
236
dbcd00eb
TT
237 rq_cq_reap(xprt);
238 sq_cq_reap(xprt);
377f9b2f 239
c48cbb40 240 svc_xprt_put(&xprt->sc_xprt);
377f9b2f
TT
241 spin_lock_irqsave(&dto_lock, flags);
242 }
243 spin_unlock_irqrestore(&dto_lock, flags);
244}
245
246/*
247 * Receive Queue Completion Handler
248 *
249 * Since an RQ completion handler is called on interrupt context, we
250 * need to defer the handling of the I/O to a tasklet
251 */
252static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
253{
254 struct svcxprt_rdma *xprt = cq_context;
255 unsigned long flags;
256
1711386c
TT
257 /* Guard against unconditional flush call for destroyed QP */
258 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
259 return;
260
377f9b2f
TT
261 /*
262 * Set the bit regardless of whether or not it's on the list
263 * because it may be on the list already due to an SQ
264 * completion.
1711386c 265 */
377f9b2f
TT
266 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
267
268 /*
269 * If this transport is not already on the DTO transport queue,
270 * add it
271 */
272 spin_lock_irqsave(&dto_lock, flags);
c48cbb40
TT
273 if (list_empty(&xprt->sc_dto_q)) {
274 svc_xprt_get(&xprt->sc_xprt);
377f9b2f 275 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
c48cbb40 276 }
377f9b2f
TT
277 spin_unlock_irqrestore(&dto_lock, flags);
278
279 /* Tasklet does all the work to avoid irqsave locks. */
280 tasklet_schedule(&dto_tasklet);
281}
282
283/*
284 * rq_cq_reap - Process the RQ CQ.
285 *
286 * Take all completing WC off the CQE and enqueue the associated DTO
287 * context on the dto_q for the transport.
0905c0f0
TT
288 *
289 * Note that caller must hold a transport reference.
377f9b2f
TT
290 */
291static void rq_cq_reap(struct svcxprt_rdma *xprt)
292{
293 int ret;
294 struct ib_wc wc;
295 struct svc_rdma_op_ctxt *ctxt = NULL;
296
dbcd00eb
TT
297 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
298 return;
299
300 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
377f9b2f
TT
301 atomic_inc(&rdma_stat_rq_poll);
302
377f9b2f
TT
303 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
304 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
305 ctxt->wc_status = wc.status;
306 ctxt->byte_len = wc.byte_len;
e6ab9143 307 svc_rdma_unmap_dma(ctxt);
377f9b2f
TT
308 if (wc.status != IB_WC_SUCCESS) {
309 /* Close the transport */
0905c0f0 310 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
377f9b2f
TT
311 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
312 svc_rdma_put_context(ctxt, 1);
0905c0f0 313 svc_xprt_put(&xprt->sc_xprt);
377f9b2f
TT
314 continue;
315 }
47698e08 316 spin_lock_bh(&xprt->sc_rq_dto_lock);
377f9b2f 317 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
47698e08 318 spin_unlock_bh(&xprt->sc_rq_dto_lock);
0905c0f0 319 svc_xprt_put(&xprt->sc_xprt);
377f9b2f 320 }
377f9b2f
TT
321
322 if (ctxt)
323 atomic_inc(&rdma_stat_rq_prod);
dbcd00eb
TT
324
325 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
326 /*
327 * If data arrived before established event,
328 * don't enqueue. This defers RPC I/O until the
329 * RDMA connection is complete.
330 */
331 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
332 svc_xprt_enqueue(&xprt->sc_xprt);
377f9b2f
TT
333}
334
e1183210
TT
335/*
336 * Processs a completion context
337 */
338static void process_context(struct svcxprt_rdma *xprt,
339 struct svc_rdma_op_ctxt *ctxt)
340{
341 svc_rdma_unmap_dma(ctxt);
342
343 switch (ctxt->wr_op) {
344 case IB_WR_SEND:
afd566ea
TT
345 if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
346 svc_rdma_put_frmr(xprt, ctxt->frmr);
e1183210
TT
347 svc_rdma_put_context(ctxt, 1);
348 break;
349
350 case IB_WR_RDMA_WRITE:
351 svc_rdma_put_context(ctxt, 0);
352 break;
353
354 case IB_WR_RDMA_READ:
146b6df6 355 case IB_WR_RDMA_READ_WITH_INV:
e1183210
TT
356 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
357 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
358 BUG_ON(!read_hdr);
146b6df6
TT
359 if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
360 svc_rdma_put_frmr(xprt, ctxt->frmr);
e1183210
TT
361 spin_lock_bh(&xprt->sc_rq_dto_lock);
362 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
363 list_add_tail(&read_hdr->dto_q,
364 &xprt->sc_read_complete_q);
365 spin_unlock_bh(&xprt->sc_rq_dto_lock);
366 svc_xprt_enqueue(&xprt->sc_xprt);
367 }
368 svc_rdma_put_context(ctxt, 0);
369 break;
370
371 default:
372 printk(KERN_ERR "svcrdma: unexpected completion type, "
373 "opcode=%d\n",
374 ctxt->wr_op);
375 break;
376 }
377}
378
377f9b2f
TT
379/*
380 * Send Queue Completion Handler - potentially called on interrupt context.
0905c0f0
TT
381 *
382 * Note that caller must hold a transport reference.
377f9b2f
TT
383 */
384static void sq_cq_reap(struct svcxprt_rdma *xprt)
385{
386 struct svc_rdma_op_ctxt *ctxt = NULL;
387 struct ib_wc wc;
388 struct ib_cq *cq = xprt->sc_sq_cq;
389 int ret;
390
dbcd00eb
TT
391 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
392 return;
393
394 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
377f9b2f
TT
395 atomic_inc(&rdma_stat_sq_poll);
396 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
377f9b2f
TT
397 if (wc.status != IB_WC_SUCCESS)
398 /* Close the transport */
399 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
400
401 /* Decrement used SQ WR count */
402 atomic_dec(&xprt->sc_sq_count);
403 wake_up(&xprt->sc_send_wait);
404
e1183210
TT
405 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
406 if (ctxt)
407 process_context(xprt, ctxt);
377f9b2f 408
0905c0f0 409 svc_xprt_put(&xprt->sc_xprt);
377f9b2f
TT
410 }
411
412 if (ctxt)
413 atomic_inc(&rdma_stat_sq_prod);
414}
415
416static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
417{
418 struct svcxprt_rdma *xprt = cq_context;
419 unsigned long flags;
420
1711386c
TT
421 /* Guard against unconditional flush call for destroyed QP */
422 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
423 return;
424
377f9b2f
TT
425 /*
426 * Set the bit regardless of whether or not it's on the list
427 * because it may be on the list already due to an RQ
428 * completion.
1711386c 429 */
377f9b2f
TT
430 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
431
432 /*
433 * If this transport is not already on the DTO transport queue,
434 * add it
435 */
436 spin_lock_irqsave(&dto_lock, flags);
c48cbb40
TT
437 if (list_empty(&xprt->sc_dto_q)) {
438 svc_xprt_get(&xprt->sc_xprt);
377f9b2f 439 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
c48cbb40 440 }
377f9b2f
TT
441 spin_unlock_irqrestore(&dto_lock, flags);
442
443 /* Tasklet does all the work to avoid irqsave locks. */
444 tasklet_schedule(&dto_tasklet);
445}
446
377f9b2f
TT
447static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
448 int listener)
449{
450 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
451
452 if (!cma_xprt)
453 return NULL;
454 svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
455 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
456 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
457 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
458 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
64be8608 459 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
377f9b2f
TT
460 init_waitqueue_head(&cma_xprt->sc_send_wait);
461
462 spin_lock_init(&cma_xprt->sc_lock);
377f9b2f 463 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
64be8608 464 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
377f9b2f
TT
465
466 cma_xprt->sc_ord = svcrdma_ord;
467
468 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
469 cma_xprt->sc_max_requests = svcrdma_max_requests;
470 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
471 atomic_set(&cma_xprt->sc_sq_count, 0);
87295b6c 472 atomic_set(&cma_xprt->sc_ctxt_used, 0);
377f9b2f 473
8948896c 474 if (listener)
377f9b2f
TT
475 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
476
477 return cma_xprt;
478}
479
480struct page *svc_rdma_get_page(void)
481{
482 struct page *page;
483
484 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
485 /* If we can't get memory, wait a bit and try again */
486 printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
487 "jiffies.\n");
488 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
489 }
490 return page;
491}
492
493int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
494{
495 struct ib_recv_wr recv_wr, *bad_recv_wr;
496 struct svc_rdma_op_ctxt *ctxt;
497 struct page *page;
a5abf4e8 498 dma_addr_t pa;
377f9b2f
TT
499 int sge_no;
500 int buflen;
501 int ret;
502
503 ctxt = svc_rdma_get_context(xprt);
504 buflen = 0;
505 ctxt->direction = DMA_FROM_DEVICE;
506 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
507 BUG_ON(sge_no >= xprt->sc_max_sge);
508 page = svc_rdma_get_page();
509 ctxt->pages[sge_no] = page;
b432e6b3
TT
510 pa = ib_dma_map_page(xprt->sc_cm_id->device,
511 page, 0, PAGE_SIZE,
377f9b2f 512 DMA_FROM_DEVICE);
a5abf4e8
TT
513 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
514 goto err_put_ctxt;
515 atomic_inc(&xprt->sc_dma_used);
377f9b2f
TT
516 ctxt->sge[sge_no].addr = pa;
517 ctxt->sge[sge_no].length = PAGE_SIZE;
a5abf4e8 518 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
4a84386f 519 ctxt->count = sge_no + 1;
377f9b2f
TT
520 buflen += PAGE_SIZE;
521 }
377f9b2f
TT
522 recv_wr.next = NULL;
523 recv_wr.sg_list = &ctxt->sge[0];
524 recv_wr.num_sge = ctxt->count;
525 recv_wr.wr_id = (u64)(unsigned long)ctxt;
526
0905c0f0 527 svc_xprt_get(&xprt->sc_xprt);
377f9b2f 528 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
0905c0f0 529 if (ret) {
21515e46 530 svc_rdma_unmap_dma(ctxt);
05a0826a 531 svc_rdma_put_context(ctxt, 1);
21515e46 532 svc_xprt_put(&xprt->sc_xprt);
0905c0f0 533 }
377f9b2f 534 return ret;
a5abf4e8
TT
535
536 err_put_ctxt:
4a84386f 537 svc_rdma_unmap_dma(ctxt);
a5abf4e8
TT
538 svc_rdma_put_context(ctxt, 1);
539 return -ENOMEM;
377f9b2f
TT
540}
541
542/*
543 * This function handles the CONNECT_REQUEST event on a listening
544 * endpoint. It is passed the cma_id for the _new_ connection. The context in
545 * this cma_id is inherited from the listening cma_id and is the svc_xprt
546 * structure for the listening endpoint.
547 *
548 * This function creates a new xprt for the new connection and enqueues it on
549 * the accept queue for the listent xprt. When the listen thread is kicked, it
550 * will call the recvfrom method on the listen xprt which will accept the new
551 * connection.
552 */
36ef25e4 553static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
377f9b2f
TT
554{
555 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
556 struct svcxprt_rdma *newxprt;
af261af4 557 struct sockaddr *sa;
377f9b2f
TT
558
559 /* Create a new transport */
560 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
561 if (!newxprt) {
562 dprintk("svcrdma: failed to create new transport\n");
563 return;
564 }
565 newxprt->sc_cm_id = new_cma_id;
566 new_cma_id->context = newxprt;
567 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
568 newxprt, newxprt->sc_cm_id, listen_xprt);
569
36ef25e4
TT
570 /* Save client advertised inbound read limit for use later in accept. */
571 newxprt->sc_ord = client_ird;
572
af261af4
TT
573 /* Set the local and remote addresses in the transport */
574 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
575 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
576 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
577 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
578
377f9b2f
TT
579 /*
580 * Enqueue the new transport on the accept queue of the listening
581 * transport
582 */
583 spin_lock_bh(&listen_xprt->sc_lock);
584 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
585 spin_unlock_bh(&listen_xprt->sc_lock);
586
587 /*
588 * Can't use svc_xprt_received here because we are not on a
589 * rqstp thread
590 */
591 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
592 svc_xprt_enqueue(&listen_xprt->sc_xprt);
593}
594
595/*
596 * Handles events generated on the listening endpoint. These events will be
597 * either be incoming connect requests or adapter removal events.
598 */
599static int rdma_listen_handler(struct rdma_cm_id *cma_id,
600 struct rdma_cm_event *event)
601{
602 struct svcxprt_rdma *xprt = cma_id->context;
603 int ret = 0;
604
605 switch (event->event) {
606 case RDMA_CM_EVENT_CONNECT_REQUEST:
607 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
608 "event=%d\n", cma_id, cma_id->context, event->event);
36ef25e4 609 handle_connect_req(cma_id,
67080c82 610 event->param.conn.initiator_depth);
377f9b2f
TT
611 break;
612
613 case RDMA_CM_EVENT_ESTABLISHED:
614 /* Accept complete */
615 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
616 "cm_id=%p\n", xprt, cma_id);
617 break;
618
619 case RDMA_CM_EVENT_DEVICE_REMOVAL:
620 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
621 xprt, cma_id);
622 if (xprt)
623 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
624 break;
625
626 default:
627 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
628 "event=%d\n", cma_id, event->event);
629 break;
630 }
631
632 return ret;
633}
634
635static int rdma_cma_handler(struct rdma_cm_id *cma_id,
636 struct rdma_cm_event *event)
637{
638 struct svc_xprt *xprt = cma_id->context;
639 struct svcxprt_rdma *rdma =
640 container_of(xprt, struct svcxprt_rdma, sc_xprt);
641 switch (event->event) {
642 case RDMA_CM_EVENT_ESTABLISHED:
643 /* Accept complete */
c48cbb40 644 svc_xprt_get(xprt);
377f9b2f
TT
645 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
646 "cm_id=%p\n", xprt, cma_id);
647 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
648 svc_xprt_enqueue(xprt);
649 break;
650 case RDMA_CM_EVENT_DISCONNECTED:
651 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
652 xprt, cma_id);
653 if (xprt) {
654 set_bit(XPT_CLOSE, &xprt->xpt_flags);
655 svc_xprt_enqueue(xprt);
120693d1 656 svc_xprt_put(xprt);
377f9b2f
TT
657 }
658 break;
659 case RDMA_CM_EVENT_DEVICE_REMOVAL:
660 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
661 "event=%d\n", cma_id, xprt, event->event);
662 if (xprt) {
663 set_bit(XPT_CLOSE, &xprt->xpt_flags);
664 svc_xprt_enqueue(xprt);
665 }
666 break;
667 default:
668 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
669 "event=%d\n", cma_id, event->event);
670 break;
671 }
672 return 0;
673}
674
675/*
676 * Create a listening RDMA service endpoint.
677 */
678static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
62832c03 679 struct net *net,
377f9b2f
TT
680 struct sockaddr *sa, int salen,
681 int flags)
682{
683 struct rdma_cm_id *listen_id;
684 struct svcxprt_rdma *cma_xprt;
685 struct svc_xprt *xprt;
686 int ret;
687
688 dprintk("svcrdma: Creating RDMA socket\n");
bade732a
TT
689 if (sa->sa_family != AF_INET) {
690 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
691 return ERR_PTR(-EAFNOSUPPORT);
692 }
377f9b2f
TT
693 cma_xprt = rdma_create_xprt(serv, 1);
694 if (!cma_xprt)
58e8f621 695 return ERR_PTR(-ENOMEM);
377f9b2f
TT
696 xprt = &cma_xprt->sc_xprt;
697
698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
699 if (IS_ERR(listen_id)) {
58e8f621
TT
700 ret = PTR_ERR(listen_id);
701 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
702 goto err0;
377f9b2f 703 }
58e8f621 704
377f9b2f
TT
705 ret = rdma_bind_addr(listen_id, sa);
706 if (ret) {
377f9b2f 707 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
58e8f621 708 goto err1;
377f9b2f
TT
709 }
710 cma_xprt->sc_cm_id = listen_id;
711
712 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
713 if (ret) {
377f9b2f 714 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
58e8f621 715 goto err1;
377f9b2f
TT
716 }
717
718 /*
719 * We need to use the address from the cm_id in case the
720 * caller specified 0 for the port number.
721 */
722 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
723 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
724
725 return &cma_xprt->sc_xprt;
58e8f621
TT
726
727 err1:
728 rdma_destroy_id(listen_id);
729 err0:
730 kfree(cma_xprt);
731 return ERR_PTR(ret);
377f9b2f
TT
732}
733
64be8608
TT
734static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
735{
736 struct ib_mr *mr;
737 struct ib_fast_reg_page_list *pl;
738 struct svc_rdma_fastreg_mr *frmr;
739
740 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
741 if (!frmr)
742 goto err;
743
744 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
846d8e7c 745 if (IS_ERR(mr))
64be8608
TT
746 goto err_free_frmr;
747
748 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
749 RPCSVC_MAXPAGES);
846d8e7c 750 if (IS_ERR(pl))
64be8608
TT
751 goto err_free_mr;
752
753 frmr->mr = mr;
754 frmr->page_list = pl;
755 INIT_LIST_HEAD(&frmr->frmr_list);
756 return frmr;
757
758 err_free_mr:
759 ib_dereg_mr(mr);
760 err_free_frmr:
761 kfree(frmr);
762 err:
763 return ERR_PTR(-ENOMEM);
764}
765
766static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
767{
768 struct svc_rdma_fastreg_mr *frmr;
769
770 while (!list_empty(&xprt->sc_frmr_q)) {
771 frmr = list_entry(xprt->sc_frmr_q.next,
772 struct svc_rdma_fastreg_mr, frmr_list);
773 list_del_init(&frmr->frmr_list);
774 ib_dereg_mr(frmr->mr);
775 ib_free_fast_reg_page_list(frmr->page_list);
776 kfree(frmr);
777 }
778}
779
780struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
781{
782 struct svc_rdma_fastreg_mr *frmr = NULL;
783
784 spin_lock_bh(&rdma->sc_frmr_q_lock);
785 if (!list_empty(&rdma->sc_frmr_q)) {
786 frmr = list_entry(rdma->sc_frmr_q.next,
787 struct svc_rdma_fastreg_mr, frmr_list);
788 list_del_init(&frmr->frmr_list);
789 frmr->map_len = 0;
790 frmr->page_list_len = 0;
791 }
792 spin_unlock_bh(&rdma->sc_frmr_q_lock);
793 if (frmr)
794 return frmr;
795
796 return rdma_alloc_frmr(rdma);
797}
798
799static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
800 struct svc_rdma_fastreg_mr *frmr)
801{
802 int page_no;
803 for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
804 dma_addr_t addr = frmr->page_list->page_list[page_no];
805 if (ib_dma_mapping_error(frmr->mr->device, addr))
806 continue;
807 atomic_dec(&xprt->sc_dma_used);
b432e6b3
TT
808 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
809 frmr->direction);
64be8608
TT
810 }
811}
812
813void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
814 struct svc_rdma_fastreg_mr *frmr)
815{
816 if (frmr) {
817 frmr_unmap_dma(rdma, frmr);
818 spin_lock_bh(&rdma->sc_frmr_q_lock);
819 BUG_ON(!list_empty(&frmr->frmr_list));
820 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
821 spin_unlock_bh(&rdma->sc_frmr_q_lock);
822 }
823}
824
377f9b2f
TT
825/*
826 * This is the xpo_recvfrom function for listening endpoints. Its
827 * purpose is to accept incoming connections. The CMA callback handler
828 * has already created a new transport and attached it to the new CMA
829 * ID.
830 *
831 * There is a queue of pending connections hung on the listening
832 * transport. This queue contains the new svc_xprt structure. This
833 * function takes svc_xprt structures off the accept_q and completes
834 * the connection.
835 */
836static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
837{
838 struct svcxprt_rdma *listen_rdma;
839 struct svcxprt_rdma *newxprt = NULL;
840 struct rdma_conn_param conn_param;
841 struct ib_qp_init_attr qp_attr;
842 struct ib_device_attr devattr;
ed72b9c6 843 int uninitialized_var(dma_mr_acc);
3a5c6380 844 int need_dma_mr;
377f9b2f
TT
845 int ret;
846 int i;
847
848 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
849 clear_bit(XPT_CONN, &xprt->xpt_flags);
850 /* Get the next entry off the accept list */
851 spin_lock_bh(&listen_rdma->sc_lock);
852 if (!list_empty(&listen_rdma->sc_accept_q)) {
853 newxprt = list_entry(listen_rdma->sc_accept_q.next,
854 struct svcxprt_rdma, sc_accept_q);
855 list_del_init(&newxprt->sc_accept_q);
856 }
857 if (!list_empty(&listen_rdma->sc_accept_q))
858 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
859 spin_unlock_bh(&listen_rdma->sc_lock);
860 if (!newxprt)
861 return NULL;
862
863 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
864 newxprt, newxprt->sc_cm_id);
865
866 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
867 if (ret) {
868 dprintk("svcrdma: could not query device attributes on "
869 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
870 goto errout;
871 }
872
873 /* Qualify the transport resource defaults with the
874 * capabilities of this particular device */
875 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
876 (size_t)RPCSVC_MAXPAGES);
877 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
878 (size_t)svcrdma_max_requests);
879 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
880
36ef25e4
TT
881 /*
882 * Limit ORD based on client limit, local device limit, and
883 * configured svcrdma limit.
884 */
885 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
886 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
377f9b2f
TT
887
888 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
889 if (IS_ERR(newxprt->sc_pd)) {
890 dprintk("svcrdma: error creating PD for connect request\n");
891 goto errout;
892 }
893 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
894 sq_comp_handler,
895 cq_event_handler,
896 newxprt,
897 newxprt->sc_sq_depth,
898 0);
899 if (IS_ERR(newxprt->sc_sq_cq)) {
900 dprintk("svcrdma: error creating SQ CQ for connect request\n");
901 goto errout;
902 }
903 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
904 rq_comp_handler,
905 cq_event_handler,
906 newxprt,
907 newxprt->sc_max_requests,
908 0);
909 if (IS_ERR(newxprt->sc_rq_cq)) {
910 dprintk("svcrdma: error creating RQ CQ for connect request\n");
911 goto errout;
912 }
913
914 memset(&qp_attr, 0, sizeof qp_attr);
915 qp_attr.event_handler = qp_event_handler;
916 qp_attr.qp_context = &newxprt->sc_xprt;
917 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
918 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
919 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
920 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
921 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
922 qp_attr.qp_type = IB_QPT_RC;
923 qp_attr.send_cq = newxprt->sc_sq_cq;
924 qp_attr.recv_cq = newxprt->sc_rq_cq;
925 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
926 " cm_id->device=%p, sc_pd->device=%p\n"
927 " cap.max_send_wr = %d\n"
928 " cap.max_recv_wr = %d\n"
929 " cap.max_send_sge = %d\n"
930 " cap.max_recv_sge = %d\n",
931 newxprt->sc_cm_id, newxprt->sc_pd,
932 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
933 qp_attr.cap.max_send_wr,
934 qp_attr.cap.max_recv_wr,
935 qp_attr.cap.max_send_sge,
936 qp_attr.cap.max_recv_sge);
937
938 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
939 if (ret) {
940 /*
941 * XXX: This is a hack. We need a xx_request_qp interface
942 * that will adjust the qp_attr's with a best-effort
943 * number
944 */
945 qp_attr.cap.max_send_sge -= 2;
946 qp_attr.cap.max_recv_sge -= 2;
947 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
948 &qp_attr);
949 if (ret) {
950 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
951 goto errout;
952 }
953 newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
954 newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
955 newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
956 newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
957 }
958 newxprt->sc_qp = newxprt->sc_cm_id->qp;
959
3a5c6380
TT
960 /*
961 * Use the most secure set of MR resources based on the
962 * transport type and available memory management features in
963 * the device. Here's the table implemented below:
964 *
965 * Fast Global DMA Remote WR
966 * Reg LKEY MR Access
967 * Sup'd Sup'd Needed Needed
968 *
969 * IWARP N N Y Y
970 * N Y Y Y
971 * Y N Y N
972 * Y Y N -
973 *
974 * IB N N Y N
975 * N Y N -
976 * Y N Y N
977 * Y Y N -
978 *
979 * NB: iWARP requires remote write access for the data sink
980 * of an RDMA_READ. IB does not.
981 */
982 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
983 newxprt->sc_frmr_pg_list_len =
984 devattr.max_fast_reg_page_list_len;
985 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
986 }
987
988 /*
989 * Determine if a DMA MR is required and if so, what privs are required
990 */
991 switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
992 case RDMA_TRANSPORT_IWARP:
993 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
994 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
995 need_dma_mr = 1;
996 dma_mr_acc =
997 (IB_ACCESS_LOCAL_WRITE |
998 IB_ACCESS_REMOTE_WRITE);
999 } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
1000 need_dma_mr = 1;
1001 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1002 } else
1003 need_dma_mr = 0;
1004 break;
1005 case RDMA_TRANSPORT_IB:
1006 if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
1007 need_dma_mr = 1;
1008 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1009 } else
1010 need_dma_mr = 0;
1011 break;
1012 default:
377f9b2f
TT
1013 goto errout;
1014 }
1015
3a5c6380
TT
1016 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1017 if (need_dma_mr) {
1018 /* Register all of physical memory */
1019 newxprt->sc_phys_mr =
1020 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1021 if (IS_ERR(newxprt->sc_phys_mr)) {
1022 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1023 ret);
1024 goto errout;
1025 }
1026 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1027 } else
1028 newxprt->sc_dma_lkey =
1029 newxprt->sc_cm_id->device->local_dma_lkey;
1030
377f9b2f
TT
1031 /* Post receive buffers */
1032 for (i = 0; i < newxprt->sc_max_requests; i++) {
1033 ret = svc_rdma_post_recv(newxprt);
1034 if (ret) {
1035 dprintk("svcrdma: failure posting receive buffers\n");
1036 goto errout;
1037 }
1038 }
1039
1040 /* Swap out the handler */
1041 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1042
af261af4
TT
1043 /*
1044 * Arm the CQs for the SQ and RQ before accepting so we can't
1045 * miss the first message
1046 */
1047 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1048 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1049
377f9b2f
TT
1050 /* Accept Connection */
1051 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1052 memset(&conn_param, 0, sizeof conn_param);
1053 conn_param.responder_resources = 0;
1054 conn_param.initiator_depth = newxprt->sc_ord;
1055 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1056 if (ret) {
1057 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1058 ret);
1059 goto errout;
1060 }
1061
1062 dprintk("svcrdma: new connection %p accepted with the following "
1063 "attributes:\n"
21454aaa 1064 " local_ip : %pI4\n"
377f9b2f 1065 " local_port : %d\n"
21454aaa 1066 " remote_ip : %pI4\n"
377f9b2f
TT
1067 " remote_port : %d\n"
1068 " max_sge : %d\n"
1069 " sq_depth : %d\n"
1070 " max_requests : %d\n"
1071 " ord : %d\n",
1072 newxprt,
21454aaa
HH
1073 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1074 route.addr.src_addr)->sin_addr.s_addr,
377f9b2f
TT
1075 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1076 route.addr.src_addr)->sin_port),
21454aaa
HH
1077 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1078 route.addr.dst_addr)->sin_addr.s_addr,
377f9b2f
TT
1079 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1080 route.addr.dst_addr)->sin_port),
1081 newxprt->sc_max_sge,
1082 newxprt->sc_sq_depth,
1083 newxprt->sc_max_requests,
1084 newxprt->sc_ord);
1085
377f9b2f
TT
1086 return &newxprt->sc_xprt;
1087
1088 errout:
1089 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
c48cbb40
TT
1090 /* Take a reference in case the DTO handler runs */
1091 svc_xprt_get(&newxprt->sc_xprt);
1711386c 1092 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
c48cbb40 1093 ib_destroy_qp(newxprt->sc_qp);
377f9b2f 1094 rdma_destroy_id(newxprt->sc_cm_id);
c48cbb40
TT
1095 /* This call to put will destroy the transport */
1096 svc_xprt_put(&newxprt->sc_xprt);
377f9b2f
TT
1097 return NULL;
1098}
1099
377f9b2f
TT
1100static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1101{
377f9b2f
TT
1102}
1103
c48cbb40 1104/*
1711386c 1105 * When connected, an svc_xprt has at least two references:
c48cbb40
TT
1106 *
1107 * - A reference held by the cm_id between the ESTABLISHED and
1108 * DISCONNECTED events. If the remote peer disconnected first, this
1109 * reference could be gone.
1110 *
1111 * - A reference held by the svc_recv code that called this function
1112 * as part of close processing.
1113 *
1711386c 1114 * At a minimum one references should still be held.
c48cbb40 1115 */
377f9b2f
TT
1116static void svc_rdma_detach(struct svc_xprt *xprt)
1117{
1118 struct svcxprt_rdma *rdma =
1119 container_of(xprt, struct svcxprt_rdma, sc_xprt);
377f9b2f 1120 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
c48cbb40
TT
1121
1122 /* Disconnect and flush posted WQE */
377f9b2f 1123 rdma_disconnect(rdma->sc_cm_id);
377f9b2f
TT
1124}
1125
8da91ea8 1126static void __svc_rdma_free(struct work_struct *work)
377f9b2f 1127{
8da91ea8
TT
1128 struct svcxprt_rdma *rdma =
1129 container_of(work, struct svcxprt_rdma, sc_work);
377f9b2f 1130 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
8da91ea8 1131
c48cbb40 1132 /* We should only be called from kref_put */
8da91ea8
TT
1133 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
1134
356d0a15
TT
1135 /*
1136 * Destroy queued, but not processed read completions. Note
1137 * that this cleanup has to be done before destroying the
1138 * cm_id because the device ptr is needed to unmap the dma in
1139 * svc_rdma_put_context.
1140 */
356d0a15
TT
1141 while (!list_empty(&rdma->sc_read_complete_q)) {
1142 struct svc_rdma_op_ctxt *ctxt;
1143 ctxt = list_entry(rdma->sc_read_complete_q.next,
1144 struct svc_rdma_op_ctxt,
1145 dto_q);
1146 list_del_init(&ctxt->dto_q);
1147 svc_rdma_put_context(ctxt, 1);
1148 }
356d0a15
TT
1149
1150 /* Destroy queued, but not processed recv completions */
356d0a15
TT
1151 while (!list_empty(&rdma->sc_rq_dto_q)) {
1152 struct svc_rdma_op_ctxt *ctxt;
1153 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1154 struct svc_rdma_op_ctxt,
1155 dto_q);
1156 list_del_init(&ctxt->dto_q);
1157 svc_rdma_put_context(ctxt, 1);
1158 }
356d0a15
TT
1159
1160 /* Warn if we leaked a resource or under-referenced */
1161 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
87295b6c 1162 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
356d0a15 1163
64be8608
TT
1164 /* De-allocate fastreg mr */
1165 rdma_dealloc_frmr_q(rdma);
1166
1711386c
TT
1167 /* Destroy the QP if present (not a listener) */
1168 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1169 ib_destroy_qp(rdma->sc_qp);
1170
c48cbb40
TT
1171 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1172 ib_destroy_cq(rdma->sc_sq_cq);
377f9b2f 1173
c48cbb40
TT
1174 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1175 ib_destroy_cq(rdma->sc_rq_cq);
377f9b2f 1176
c48cbb40
TT
1177 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1178 ib_dereg_mr(rdma->sc_phys_mr);
377f9b2f 1179
c48cbb40
TT
1180 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1181 ib_dealloc_pd(rdma->sc_pd);
377f9b2f 1182
356d0a15
TT
1183 /* Destroy the CM ID */
1184 rdma_destroy_id(rdma->sc_cm_id);
1185
c48cbb40 1186 kfree(rdma);
377f9b2f
TT
1187}
1188
8da91ea8
TT
1189static void svc_rdma_free(struct svc_xprt *xprt)
1190{
1191 struct svcxprt_rdma *rdma =
1192 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1193 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
a25e758c 1194 queue_work(svc_rdma_wq, &rdma->sc_work);
8da91ea8
TT
1195}
1196
377f9b2f
TT
1197static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1198{
1199 struct svcxprt_rdma *rdma =
1200 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1201
1202 /*
1203 * If there are fewer SQ WR available than required to send a
1204 * simple response, return false.
1205 */
1206 if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1207 return 0;
1208
1209 /*
1210 * ...or there are already waiters on the SQ,
1211 * return false.
1212 */
1213 if (waitqueue_active(&rdma->sc_send_wait))
1214 return 0;
1215
1216 /* Otherwise return true. */
1217 return 1;
1218}
1219
e1183210
TT
1220/*
1221 * Attempt to register the kvec representing the RPC memory with the
1222 * device.
1223 *
1224 * Returns:
1225 * NULL : The device does not support fastreg or there were no more
1226 * fastreg mr.
1227 * frmr : The kvec register request was successfully posted.
1228 * <0 : An error was encountered attempting to register the kvec.
1229 */
1230int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1231 struct svc_rdma_fastreg_mr *frmr)
1232{
1233 struct ib_send_wr fastreg_wr;
1234 u8 key;
1235
1236 /* Bump the key */
1237 key = (u8)(frmr->mr->lkey & 0x000000FF);
1238 ib_update_fast_reg_key(frmr->mr, ++key);
1239
1240 /* Prepare FASTREG WR */
1241 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1242 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1243 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1244 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1245 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1246 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1247 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1248 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1249 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1250 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1251 return svc_rdma_send(xprt, &fastreg_wr);
1252}
1253
377f9b2f
TT
1254int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1255{
5b180a9a
TT
1256 struct ib_send_wr *bad_wr, *n_wr;
1257 int wr_count;
1258 int i;
377f9b2f
TT
1259 int ret;
1260
1261 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
9d6347ac 1262 return -ENOTCONN;
377f9b2f
TT
1263
1264 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
5b180a9a
TT
1265 wr_count = 1;
1266 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1267 wr_count++;
1268
377f9b2f
TT
1269 /* If the SQ is full, wait until an SQ entry is available */
1270 while (1) {
1271 spin_lock_bh(&xprt->sc_lock);
5b180a9a 1272 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
377f9b2f
TT
1273 spin_unlock_bh(&xprt->sc_lock);
1274 atomic_inc(&rdma_stat_sq_starve);
dbcd00eb
TT
1275
1276 /* See if we can opportunistically reap SQ WR to make room */
377f9b2f
TT
1277 sq_cq_reap(xprt);
1278
1279 /* Wait until SQ WR available if SQ still full */
1280 wait_event(xprt->sc_send_wait,
1281 atomic_read(&xprt->sc_sq_count) <
1282 xprt->sc_sq_depth);
830bb59b 1283 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
b432e6b3 1284 return -ENOTCONN;
377f9b2f
TT
1285 continue;
1286 }
5b180a9a
TT
1287 /* Take a transport ref for each WR posted */
1288 for (i = 0; i < wr_count; i++)
1289 svc_xprt_get(&xprt->sc_xprt);
1290
1291 /* Bump used SQ WR count and post */
1292 atomic_add(wr_count, &xprt->sc_sq_count);
377f9b2f 1293 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
5b180a9a
TT
1294 if (ret) {
1295 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1296 atomic_sub(wr_count, &xprt->sc_sq_count);
1297 for (i = 0; i < wr_count; i ++)
1298 svc_xprt_put(&xprt->sc_xprt);
377f9b2f
TT
1299 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1300 "sc_sq_count=%d, sc_sq_depth=%d\n",
1301 ret, atomic_read(&xprt->sc_sq_count),
1302 xprt->sc_sq_depth);
0905c0f0 1303 }
377f9b2f 1304 spin_unlock_bh(&xprt->sc_lock);
5b180a9a
TT
1305 if (ret)
1306 wake_up(&xprt->sc_send_wait);
377f9b2f
TT
1307 break;
1308 }
1309 return ret;
1310}
1311
008fdbc5
TT
1312void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1313 enum rpcrdma_errcode err)
377f9b2f
TT
1314{
1315 struct ib_send_wr err_wr;
377f9b2f
TT
1316 struct page *p;
1317 struct svc_rdma_op_ctxt *ctxt;
1318 u32 *va;
1319 int length;
1320 int ret;
1321
1322 p = svc_rdma_get_page();
1323 va = page_address(p);
1324
1325 /* XDR encode error */
1326 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1327
4a84386f
TT
1328 ctxt = svc_rdma_get_context(xprt);
1329 ctxt->direction = DMA_FROM_DEVICE;
1330 ctxt->count = 1;
1331 ctxt->pages[0] = p;
1332
377f9b2f 1333 /* Prepare SGE for local address */
4a84386f
TT
1334 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1335 p, 0, length, DMA_FROM_DEVICE);
1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
04911b53
TT
1337 put_page(p);
1338 return;
1339 }
1340 atomic_inc(&xprt->sc_dma_used);
4a84386f
TT
1341 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1342 ctxt->sge[0].length = length;
377f9b2f
TT
1343
1344 /* Prepare SEND WR */
1345 memset(&err_wr, 0, sizeof err_wr);
1346 ctxt->wr_op = IB_WR_SEND;
1347 err_wr.wr_id = (unsigned long)ctxt;
4a84386f 1348 err_wr.sg_list = ctxt->sge;
377f9b2f
TT
1349 err_wr.num_sge = 1;
1350 err_wr.opcode = IB_WR_SEND;
1351 err_wr.send_flags = IB_SEND_SIGNALED;
1352
1353 /* Post It */
1354 ret = svc_rdma_send(xprt, &err_wr);
1355 if (ret) {
008fdbc5
TT
1356 dprintk("svcrdma: Error %d posting send for protocol error\n",
1357 ret);
4a84386f 1358 svc_rdma_unmap_dma(ctxt);
377f9b2f
TT
1359 svc_rdma_put_context(ctxt, 1);
1360 }
377f9b2f 1361}
This page took 0.285459 seconds and 5 git commands to generate.