2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
56 static void cma_add_one(struct ib_device
*device
);
57 static void cma_remove_one(struct ib_device
*device
);
59 static struct ib_client cma_client
= {
62 .remove
= cma_remove_one
65 static struct ib_sa_client sa_client
;
66 static struct rdma_addr_client addr_client
;
67 static LIST_HEAD(dev_list
);
68 static LIST_HEAD(listen_any_list
);
69 static DEFINE_MUTEX(lock
);
70 static struct workqueue_struct
*cma_wq
;
71 static DEFINE_IDR(sdp_ps
);
72 static DEFINE_IDR(tcp_ps
);
73 static DEFINE_IDR(udp_ps
);
74 static DEFINE_IDR(ipoib_ps
);
78 struct list_head list
;
79 struct ib_device
*device
;
80 struct completion comp
;
82 struct list_head id_list
;
99 struct rdma_bind_list
{
101 struct hlist_head owners
;
106 * Device removal can occur at anytime, so we need extra handling to
107 * serialize notifying the user of device removal with other callbacks.
108 * We do this by disabling removal notification while a callback is in process,
109 * and reporting it after the callback completes.
111 struct rdma_id_private
{
112 struct rdma_cm_id id
;
114 struct rdma_bind_list
*bind_list
;
115 struct hlist_node node
;
116 struct list_head list
;
117 struct list_head listen_list
;
118 struct cma_device
*cma_dev
;
119 struct list_head mc_list
;
121 enum cma_state state
;
123 struct completion comp
;
125 wait_queue_head_t wait_remove
;
130 struct ib_sa_query
*query
;
143 struct cma_multicast
{
144 struct rdma_id_private
*id_priv
;
146 struct ib_sa_multicast
*ib
;
148 struct list_head list
;
150 struct sockaddr addr
;
151 u8 pad
[sizeof(struct sockaddr_in6
) -
152 sizeof(struct sockaddr
)];
156 struct work_struct work
;
157 struct rdma_id_private
*id
;
158 enum cma_state old_state
;
159 enum cma_state new_state
;
160 struct rdma_cm_event event
;
173 u8 ip_version
; /* IP version: 7:4 */
175 union cma_ip_addr src_addr
;
176 union cma_ip_addr dst_addr
;
181 u8 sdp_version
; /* Major version: 7:4 */
182 u8 ip_version
; /* IP version: 7:4 */
183 u8 sdp_specific1
[10];
186 union cma_ip_addr src_addr
;
187 union cma_ip_addr dst_addr
;
195 #define CMA_VERSION 0x00
196 #define SDP_MAJ_VERSION 0x2
198 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
203 spin_lock_irqsave(&id_priv
->lock
, flags
);
204 ret
= (id_priv
->state
== comp
);
205 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
209 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
210 enum cma_state comp
, enum cma_state exch
)
215 spin_lock_irqsave(&id_priv
->lock
, flags
);
216 if ((ret
= (id_priv
->state
== comp
)))
217 id_priv
->state
= exch
;
218 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
222 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
228 spin_lock_irqsave(&id_priv
->lock
, flags
);
229 old
= id_priv
->state
;
230 id_priv
->state
= exch
;
231 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
235 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
237 return hdr
->ip_version
>> 4;
240 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
242 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
245 static inline u8
sdp_get_majv(u8 sdp_version
)
247 return sdp_version
>> 4;
250 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
252 return hh
->ip_version
>> 4;
255 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
257 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
260 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
262 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
265 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
266 struct cma_device
*cma_dev
)
268 atomic_inc(&cma_dev
->refcount
);
269 id_priv
->cma_dev
= cma_dev
;
270 id_priv
->id
.device
= cma_dev
->device
;
271 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
274 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
276 if (atomic_dec_and_test(&cma_dev
->refcount
))
277 complete(&cma_dev
->comp
);
280 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
282 list_del(&id_priv
->list
);
283 cma_deref_dev(id_priv
->cma_dev
);
284 id_priv
->cma_dev
= NULL
;
287 static int cma_set_qkey(struct ib_device
*device
, u8 port_num
,
288 enum rdma_port_space ps
,
289 struct rdma_dev_addr
*dev_addr
, u32
*qkey
)
291 struct ib_sa_mcmember_rec rec
;
296 *qkey
= RDMA_UDP_QKEY
;
299 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
300 ret
= ib_sa_get_mcmember_rec(device
, port_num
, &rec
.mgid
, &rec
);
301 *qkey
= be32_to_cpu(rec
.qkey
);
309 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
311 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
312 struct cma_device
*cma_dev
;
316 switch (rdma_node_get_transport(dev_addr
->dev_type
)) {
317 case RDMA_TRANSPORT_IB
:
318 ib_addr_get_sgid(dev_addr
, &gid
);
320 case RDMA_TRANSPORT_IWARP
:
321 iw_addr_get_sgid(dev_addr
, &gid
);
327 list_for_each_entry(cma_dev
, &dev_list
, list
) {
328 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
329 &id_priv
->id
.port_num
, NULL
);
331 ret
= cma_set_qkey(cma_dev
->device
,
332 id_priv
->id
.port_num
,
333 id_priv
->id
.ps
, dev_addr
,
336 cma_attach_to_dev(id_priv
, cma_dev
);
343 static void cma_deref_id(struct rdma_id_private
*id_priv
)
345 if (atomic_dec_and_test(&id_priv
->refcount
))
346 complete(&id_priv
->comp
);
349 static int cma_disable_remove(struct rdma_id_private
*id_priv
,
350 enum cma_state state
)
355 spin_lock_irqsave(&id_priv
->lock
, flags
);
356 if (id_priv
->state
== state
) {
357 atomic_inc(&id_priv
->dev_remove
);
361 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
365 static void cma_enable_remove(struct rdma_id_private
*id_priv
)
367 if (atomic_dec_and_test(&id_priv
->dev_remove
))
368 wake_up(&id_priv
->wait_remove
);
371 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
372 void *context
, enum rdma_port_space ps
)
374 struct rdma_id_private
*id_priv
;
376 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
378 return ERR_PTR(-ENOMEM
);
380 id_priv
->state
= CMA_IDLE
;
381 id_priv
->id
.context
= context
;
382 id_priv
->id
.event_handler
= event_handler
;
384 spin_lock_init(&id_priv
->lock
);
385 init_completion(&id_priv
->comp
);
386 atomic_set(&id_priv
->refcount
, 1);
387 init_waitqueue_head(&id_priv
->wait_remove
);
388 atomic_set(&id_priv
->dev_remove
, 0);
389 INIT_LIST_HEAD(&id_priv
->listen_list
);
390 INIT_LIST_HEAD(&id_priv
->mc_list
);
391 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
395 EXPORT_SYMBOL(rdma_create_id
);
397 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
399 struct ib_qp_attr qp_attr
;
400 int qp_attr_mask
, ret
;
402 qp_attr
.qp_state
= IB_QPS_INIT
;
403 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
407 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
411 qp_attr
.qp_state
= IB_QPS_RTR
;
412 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
416 qp_attr
.qp_state
= IB_QPS_RTS
;
418 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
423 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
425 struct ib_qp_attr qp_attr
;
426 int qp_attr_mask
, ret
;
428 qp_attr
.qp_state
= IB_QPS_INIT
;
429 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
433 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
436 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
437 struct ib_qp_init_attr
*qp_init_attr
)
439 struct rdma_id_private
*id_priv
;
443 id_priv
= container_of(id
, struct rdma_id_private
, id
);
444 if (id
->device
!= pd
->device
)
447 qp
= ib_create_qp(pd
, qp_init_attr
);
451 if (cma_is_ud_ps(id_priv
->id
.ps
))
452 ret
= cma_init_ud_qp(id_priv
, qp
);
454 ret
= cma_init_conn_qp(id_priv
, qp
);
459 id_priv
->qp_num
= qp
->qp_num
;
460 id_priv
->srq
= (qp
->srq
!= NULL
);
466 EXPORT_SYMBOL(rdma_create_qp
);
468 void rdma_destroy_qp(struct rdma_cm_id
*id
)
470 ib_destroy_qp(id
->qp
);
472 EXPORT_SYMBOL(rdma_destroy_qp
);
474 static int cma_modify_qp_rtr(struct rdma_cm_id
*id
)
476 struct ib_qp_attr qp_attr
;
477 int qp_attr_mask
, ret
;
482 /* Need to update QP attributes from default values. */
483 qp_attr
.qp_state
= IB_QPS_INIT
;
484 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
488 ret
= ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
492 qp_attr
.qp_state
= IB_QPS_RTR
;
493 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
497 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
500 static int cma_modify_qp_rts(struct rdma_cm_id
*id
)
502 struct ib_qp_attr qp_attr
;
503 int qp_attr_mask
, ret
;
508 qp_attr
.qp_state
= IB_QPS_RTS
;
509 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
513 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
516 static int cma_modify_qp_err(struct rdma_cm_id
*id
)
518 struct ib_qp_attr qp_attr
;
523 qp_attr
.qp_state
= IB_QPS_ERR
;
524 return ib_modify_qp(id
->qp
, &qp_attr
, IB_QP_STATE
);
527 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
528 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
530 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
533 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
534 ib_addr_get_pkey(dev_addr
),
535 &qp_attr
->pkey_index
);
539 qp_attr
->port_num
= id_priv
->id
.port_num
;
540 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
542 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
543 qp_attr
->qkey
= id_priv
->qkey
;
544 *qp_attr_mask
|= IB_QP_QKEY
;
546 qp_attr
->qp_access_flags
= 0;
547 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
552 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
555 struct rdma_id_private
*id_priv
;
558 id_priv
= container_of(id
, struct rdma_id_private
, id
);
559 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
560 case RDMA_TRANSPORT_IB
:
561 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
562 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
564 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
566 if (qp_attr
->qp_state
== IB_QPS_RTR
)
567 qp_attr
->rq_psn
= id_priv
->seq_num
;
569 case RDMA_TRANSPORT_IWARP
:
570 if (!id_priv
->cm_id
.iw
) {
571 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
572 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
574 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
584 EXPORT_SYMBOL(rdma_init_qp_attr
);
586 static inline int cma_zero_addr(struct sockaddr
*addr
)
588 struct in6_addr
*ip6
;
590 if (addr
->sa_family
== AF_INET
)
591 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
593 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
594 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
595 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
599 static inline int cma_loopback_addr(struct sockaddr
*addr
)
601 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
604 static inline int cma_any_addr(struct sockaddr
*addr
)
606 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
609 static inline __be16
cma_port(struct sockaddr
*addr
)
611 if (addr
->sa_family
== AF_INET
)
612 return ((struct sockaddr_in
*) addr
)->sin_port
;
614 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
617 static inline int cma_any_port(struct sockaddr
*addr
)
619 return !cma_port(addr
);
622 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
623 u8
*ip_ver
, __u16
*port
,
624 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
628 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
632 *ip_ver
= sdp_get_ip_ver(hdr
);
633 *port
= ((struct sdp_hh
*) hdr
)->port
;
634 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
635 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
638 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
641 *ip_ver
= cma_get_ip_ver(hdr
);
642 *port
= ((struct cma_hdr
*) hdr
)->port
;
643 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
644 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
648 if (*ip_ver
!= 4 && *ip_ver
!= 6)
653 static void cma_save_net_info(struct rdma_addr
*addr
,
654 struct rdma_addr
*listen_addr
,
655 u8 ip_ver
, __u16 port
,
656 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
658 struct sockaddr_in
*listen4
, *ip4
;
659 struct sockaddr_in6
*listen6
, *ip6
;
663 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
664 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
665 ip4
->sin_family
= listen4
->sin_family
;
666 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
667 ip4
->sin_port
= listen4
->sin_port
;
669 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
670 ip4
->sin_family
= listen4
->sin_family
;
671 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
672 ip4
->sin_port
= port
;
675 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
676 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
677 ip6
->sin6_family
= listen6
->sin6_family
;
678 ip6
->sin6_addr
= dst
->ip6
;
679 ip6
->sin6_port
= listen6
->sin6_port
;
681 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
682 ip6
->sin6_family
= listen6
->sin6_family
;
683 ip6
->sin6_addr
= src
->ip6
;
684 ip6
->sin6_port
= port
;
691 static inline int cma_user_data_offset(enum rdma_port_space ps
)
697 return sizeof(struct cma_hdr
);
701 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
703 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
704 case RDMA_TRANSPORT_IB
:
706 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
713 static inline int cma_internal_listen(struct rdma_id_private
*id_priv
)
715 return (id_priv
->state
== CMA_LISTEN
) && id_priv
->cma_dev
&&
716 cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
);
719 static void cma_destroy_listen(struct rdma_id_private
*id_priv
)
721 cma_exch(id_priv
, CMA_DESTROYING
);
723 if (id_priv
->cma_dev
) {
724 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
725 case RDMA_TRANSPORT_IB
:
726 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
727 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
729 case RDMA_TRANSPORT_IWARP
:
730 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
731 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
736 cma_detach_from_dev(id_priv
);
738 list_del(&id_priv
->listen_list
);
740 cma_deref_id(id_priv
);
741 wait_for_completion(&id_priv
->comp
);
746 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
748 struct rdma_id_private
*dev_id_priv
;
751 list_del(&id_priv
->list
);
753 while (!list_empty(&id_priv
->listen_list
)) {
754 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
755 struct rdma_id_private
, listen_list
);
756 cma_destroy_listen(dev_id_priv
);
761 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
762 enum cma_state state
)
766 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
768 case CMA_ROUTE_QUERY
:
769 cma_cancel_route(id_priv
);
772 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
774 cma_cancel_listens(id_priv
);
781 static void cma_release_port(struct rdma_id_private
*id_priv
)
783 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
789 hlist_del(&id_priv
->node
);
790 if (hlist_empty(&bind_list
->owners
)) {
791 idr_remove(bind_list
->ps
, bind_list
->port
);
797 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
799 struct cma_multicast
*mc
;
801 while (!list_empty(&id_priv
->mc_list
)) {
802 mc
= container_of(id_priv
->mc_list
.next
,
803 struct cma_multicast
, list
);
805 ib_sa_free_multicast(mc
->multicast
.ib
);
810 void rdma_destroy_id(struct rdma_cm_id
*id
)
812 struct rdma_id_private
*id_priv
;
813 enum cma_state state
;
815 id_priv
= container_of(id
, struct rdma_id_private
, id
);
816 state
= cma_exch(id_priv
, CMA_DESTROYING
);
817 cma_cancel_operation(id_priv
, state
);
820 if (id_priv
->cma_dev
) {
822 switch (rdma_node_get_transport(id
->device
->node_type
)) {
823 case RDMA_TRANSPORT_IB
:
824 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
825 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
827 case RDMA_TRANSPORT_IWARP
:
828 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
829 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
834 cma_leave_mc_groups(id_priv
);
836 cma_detach_from_dev(id_priv
);
840 cma_release_port(id_priv
);
841 cma_deref_id(id_priv
);
842 wait_for_completion(&id_priv
->comp
);
844 kfree(id_priv
->id
.route
.path_rec
);
847 EXPORT_SYMBOL(rdma_destroy_id
);
849 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
853 ret
= cma_modify_qp_rtr(&id_priv
->id
);
857 ret
= cma_modify_qp_rts(&id_priv
->id
);
861 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
867 cma_modify_qp_err(&id_priv
->id
);
868 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
873 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
875 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
876 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
883 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
884 struct ib_cm_rep_event_param
*rep_data
,
887 event
->param
.conn
.private_data
= private_data
;
888 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
889 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
890 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
891 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
892 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
893 event
->param
.conn
.srq
= rep_data
->srq
;
894 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
897 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
899 struct rdma_id_private
*id_priv
= cm_id
->context
;
900 struct rdma_cm_event event
;
903 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
906 memset(&event
, 0, sizeof event
);
907 switch (ib_event
->event
) {
908 case IB_CM_REQ_ERROR
:
909 case IB_CM_REP_ERROR
:
910 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
911 event
.status
= -ETIMEDOUT
;
913 case IB_CM_REP_RECEIVED
:
914 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
916 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
917 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
918 event
.status
= cma_rep_recv(id_priv
);
919 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
920 RDMA_CM_EVENT_ESTABLISHED
;
922 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
923 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
924 ib_event
->private_data
);
926 case IB_CM_RTU_RECEIVED
:
927 case IB_CM_USER_ESTABLISHED
:
928 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
930 case IB_CM_DREQ_ERROR
:
931 event
.status
= -ETIMEDOUT
; /* fall through */
932 case IB_CM_DREQ_RECEIVED
:
933 case IB_CM_DREP_RECEIVED
:
934 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
936 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
938 case IB_CM_TIMEWAIT_EXIT
:
939 case IB_CM_MRA_RECEIVED
:
942 case IB_CM_REJ_RECEIVED
:
943 cma_modify_qp_err(&id_priv
->id
);
944 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
945 event
.event
= RDMA_CM_EVENT_REJECTED
;
946 event
.param
.conn
.private_data
= ib_event
->private_data
;
947 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
950 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
955 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
957 /* Destroy the CM ID by returning a non-zero value. */
958 id_priv
->cm_id
.ib
= NULL
;
959 cma_exch(id_priv
, CMA_DESTROYING
);
960 cma_enable_remove(id_priv
);
961 rdma_destroy_id(&id_priv
->id
);
965 cma_enable_remove(id_priv
);
969 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
970 struct ib_cm_event
*ib_event
)
972 struct rdma_id_private
*id_priv
;
973 struct rdma_cm_id
*id
;
974 struct rdma_route
*rt
;
975 union cma_ip_addr
*src
, *dst
;
979 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
980 &ip_ver
, &port
, &src
, &dst
))
983 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
988 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
989 ip_ver
, port
, src
, dst
);
992 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
993 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
998 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
999 if (rt
->num_paths
== 2)
1000 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1002 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1003 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1004 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1005 rt
->addr
.dev_addr
.dev_type
= RDMA_NODE_IB_CA
;
1007 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1008 id_priv
->state
= CMA_CONNECT
;
1012 rdma_destroy_id(id
);
1017 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1018 struct ib_cm_event
*ib_event
)
1020 struct rdma_id_private
*id_priv
;
1021 struct rdma_cm_id
*id
;
1022 union cma_ip_addr
*src
, *dst
;
1027 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1033 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1034 &ip_ver
, &port
, &src
, &dst
))
1037 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1038 ip_ver
, port
, src
, dst
);
1040 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
1041 &id
->route
.addr
.dev_addr
);
1045 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1046 id_priv
->state
= CMA_CONNECT
;
1049 rdma_destroy_id(id
);
1053 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1054 struct ib_cm_req_event_param
*req_data
,
1055 void *private_data
, int offset
)
1057 event
->param
.conn
.private_data
= private_data
+ offset
;
1058 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1059 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1060 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1061 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1062 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1063 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1064 event
->param
.conn
.srq
= req_data
->srq
;
1065 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1068 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1070 struct rdma_id_private
*listen_id
, *conn_id
;
1071 struct rdma_cm_event event
;
1074 listen_id
= cm_id
->context
;
1075 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1076 return -ECONNABORTED
;
1078 memset(&event
, 0, sizeof event
);
1079 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1080 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1081 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1082 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1083 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1084 event
.param
.ud
.private_data_len
=
1085 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1087 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1088 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1089 ib_event
->private_data
, offset
);
1096 atomic_inc(&conn_id
->dev_remove
);
1098 ret
= cma_acquire_dev(conn_id
);
1099 mutex_unlock(&lock
);
1101 goto release_conn_id
;
1103 conn_id
->cm_id
.ib
= cm_id
;
1104 cm_id
->context
= conn_id
;
1105 cm_id
->cm_handler
= cma_ib_handler
;
1107 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1111 /* Destroy the CM ID by returning a non-zero value. */
1112 conn_id
->cm_id
.ib
= NULL
;
1115 cma_exch(conn_id
, CMA_DESTROYING
);
1116 cma_enable_remove(conn_id
);
1117 rdma_destroy_id(&conn_id
->id
);
1120 cma_enable_remove(listen_id
);
1124 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1126 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1129 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1130 struct ib_cm_compare_data
*compare
)
1132 struct cma_hdr
*cma_data
, *cma_mask
;
1133 struct sdp_hh
*sdp_data
, *sdp_mask
;
1135 struct in6_addr ip6_addr
;
1137 memset(compare
, 0, sizeof *compare
);
1138 cma_data
= (void *) compare
->data
;
1139 cma_mask
= (void *) compare
->mask
;
1140 sdp_data
= (void *) compare
->data
;
1141 sdp_mask
= (void *) compare
->mask
;
1143 switch (addr
->sa_family
) {
1145 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1146 if (ps
== RDMA_PS_SDP
) {
1147 sdp_set_ip_ver(sdp_data
, 4);
1148 sdp_set_ip_ver(sdp_mask
, 0xF);
1149 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1150 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
1152 cma_set_ip_ver(cma_data
, 4);
1153 cma_set_ip_ver(cma_mask
, 0xF);
1154 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1155 cma_mask
->dst_addr
.ip4
.addr
= ~0;
1159 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1160 if (ps
== RDMA_PS_SDP
) {
1161 sdp_set_ip_ver(sdp_data
, 6);
1162 sdp_set_ip_ver(sdp_mask
, 0xF);
1163 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1164 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1165 sizeof sdp_mask
->dst_addr
.ip6
);
1167 cma_set_ip_ver(cma_data
, 6);
1168 cma_set_ip_ver(cma_mask
, 0xF);
1169 cma_data
->dst_addr
.ip6
= ip6_addr
;
1170 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1171 sizeof cma_mask
->dst_addr
.ip6
);
1179 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1181 struct rdma_id_private
*id_priv
= iw_id
->context
;
1182 struct rdma_cm_event event
;
1183 struct sockaddr_in
*sin
;
1186 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
1189 memset(&event
, 0, sizeof event
);
1190 switch (iw_event
->event
) {
1191 case IW_CM_EVENT_CLOSE
:
1192 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1194 case IW_CM_EVENT_CONNECT_REPLY
:
1195 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1196 *sin
= iw_event
->local_addr
;
1197 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1198 *sin
= iw_event
->remote_addr
;
1199 switch (iw_event
->status
) {
1201 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1205 event
.event
= RDMA_CM_EVENT_REJECTED
;
1208 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1211 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1215 case IW_CM_EVENT_ESTABLISHED
:
1216 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1222 event
.status
= iw_event
->status
;
1223 event
.param
.conn
.private_data
= iw_event
->private_data
;
1224 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1225 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1227 /* Destroy the CM ID by returning a non-zero value. */
1228 id_priv
->cm_id
.iw
= NULL
;
1229 cma_exch(id_priv
, CMA_DESTROYING
);
1230 cma_enable_remove(id_priv
);
1231 rdma_destroy_id(&id_priv
->id
);
1235 cma_enable_remove(id_priv
);
1239 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1240 struct iw_cm_event
*iw_event
)
1242 struct rdma_cm_id
*new_cm_id
;
1243 struct rdma_id_private
*listen_id
, *conn_id
;
1244 struct sockaddr_in
*sin
;
1245 struct net_device
*dev
= NULL
;
1246 struct rdma_cm_event event
;
1249 listen_id
= cm_id
->context
;
1250 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1251 return -ECONNABORTED
;
1253 /* Create a new RDMA id for the new IW CM ID */
1254 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1255 listen_id
->id
.context
,
1261 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1262 atomic_inc(&conn_id
->dev_remove
);
1263 conn_id
->state
= CMA_CONNECT
;
1265 dev
= ip_dev_find(iw_event
->local_addr
.sin_addr
.s_addr
);
1267 ret
= -EADDRNOTAVAIL
;
1268 cma_enable_remove(conn_id
);
1269 rdma_destroy_id(new_cm_id
);
1272 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1274 cma_enable_remove(conn_id
);
1275 rdma_destroy_id(new_cm_id
);
1280 ret
= cma_acquire_dev(conn_id
);
1281 mutex_unlock(&lock
);
1283 cma_enable_remove(conn_id
);
1284 rdma_destroy_id(new_cm_id
);
1288 conn_id
->cm_id
.iw
= cm_id
;
1289 cm_id
->context
= conn_id
;
1290 cm_id
->cm_handler
= cma_iw_handler
;
1292 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1293 *sin
= iw_event
->local_addr
;
1294 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1295 *sin
= iw_event
->remote_addr
;
1297 memset(&event
, 0, sizeof event
);
1298 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1299 event
.param
.conn
.private_data
= iw_event
->private_data
;
1300 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1301 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1303 /* User wants to destroy the CM ID */
1304 conn_id
->cm_id
.iw
= NULL
;
1305 cma_exch(conn_id
, CMA_DESTROYING
);
1306 cma_enable_remove(conn_id
);
1307 rdma_destroy_id(&conn_id
->id
);
1313 cma_enable_remove(listen_id
);
1317 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1319 struct ib_cm_compare_data compare_data
;
1320 struct sockaddr
*addr
;
1324 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1326 if (IS_ERR(id_priv
->cm_id
.ib
))
1327 return PTR_ERR(id_priv
->cm_id
.ib
);
1329 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1330 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1331 if (cma_any_addr(addr
))
1332 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1334 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1335 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1339 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1340 id_priv
->cm_id
.ib
= NULL
;
1346 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1349 struct sockaddr_in
*sin
;
1351 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1352 iw_conn_req_handler
,
1354 if (IS_ERR(id_priv
->cm_id
.iw
))
1355 return PTR_ERR(id_priv
->cm_id
.iw
);
1357 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1358 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1360 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1363 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1364 id_priv
->cm_id
.iw
= NULL
;
1370 static int cma_listen_handler(struct rdma_cm_id
*id
,
1371 struct rdma_cm_event
*event
)
1373 struct rdma_id_private
*id_priv
= id
->context
;
1375 id
->context
= id_priv
->id
.context
;
1376 id
->event_handler
= id_priv
->id
.event_handler
;
1377 return id_priv
->id
.event_handler(id
, event
);
1380 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1381 struct cma_device
*cma_dev
)
1383 struct rdma_id_private
*dev_id_priv
;
1384 struct rdma_cm_id
*id
;
1387 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1391 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1393 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1394 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1395 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1397 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1398 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1400 ret
= rdma_listen(id
, id_priv
->backlog
);
1406 cma_destroy_listen(dev_id_priv
);
1409 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1411 struct cma_device
*cma_dev
;
1414 list_add_tail(&id_priv
->list
, &listen_any_list
);
1415 list_for_each_entry(cma_dev
, &dev_list
, list
)
1416 cma_listen_on_dev(id_priv
, cma_dev
);
1417 mutex_unlock(&lock
);
1420 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1422 struct sockaddr_in addr_in
;
1424 memset(&addr_in
, 0, sizeof addr_in
);
1425 addr_in
.sin_family
= af
;
1426 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1429 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1431 struct rdma_id_private
*id_priv
;
1434 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1435 if (id_priv
->state
== CMA_IDLE
) {
1436 ret
= cma_bind_any(id
, AF_INET
);
1441 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1444 id_priv
->backlog
= backlog
;
1446 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1447 case RDMA_TRANSPORT_IB
:
1448 ret
= cma_ib_listen(id_priv
);
1452 case RDMA_TRANSPORT_IWARP
:
1453 ret
= cma_iw_listen(id_priv
, backlog
);
1462 cma_listen_on_all(id_priv
);
1466 id_priv
->backlog
= 0;
1467 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1470 EXPORT_SYMBOL(rdma_listen
);
1472 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1475 struct cma_work
*work
= context
;
1476 struct rdma_route
*route
;
1478 route
= &work
->id
->id
.route
;
1481 route
->num_paths
= 1;
1482 *route
->path_rec
= *path_rec
;
1484 work
->old_state
= CMA_ROUTE_QUERY
;
1485 work
->new_state
= CMA_ADDR_RESOLVED
;
1486 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1487 work
->event
.status
= status
;
1490 queue_work(cma_wq
, &work
->work
);
1493 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1494 struct cma_work
*work
)
1496 struct rdma_dev_addr
*addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1497 struct ib_sa_path_rec path_rec
;
1499 memset(&path_rec
, 0, sizeof path_rec
);
1500 ib_addr_get_sgid(addr
, &path_rec
.sgid
);
1501 ib_addr_get_dgid(addr
, &path_rec
.dgid
);
1502 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(addr
));
1503 path_rec
.numb_path
= 1;
1504 path_rec
.reversible
= 1;
1506 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1507 id_priv
->id
.port_num
, &path_rec
,
1508 IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1509 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1510 IB_SA_PATH_REC_REVERSIBLE
,
1511 timeout_ms
, GFP_KERNEL
,
1512 cma_query_handler
, work
, &id_priv
->query
);
1514 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1517 static void cma_work_handler(struct work_struct
*_work
)
1519 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1520 struct rdma_id_private
*id_priv
= work
->id
;
1523 atomic_inc(&id_priv
->dev_remove
);
1524 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1527 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1528 cma_exch(id_priv
, CMA_DESTROYING
);
1532 cma_enable_remove(id_priv
);
1533 cma_deref_id(id_priv
);
1535 rdma_destroy_id(&id_priv
->id
);
1539 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1541 struct rdma_route
*route
= &id_priv
->id
.route
;
1542 struct cma_work
*work
;
1545 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1550 INIT_WORK(&work
->work
, cma_work_handler
);
1551 work
->old_state
= CMA_ROUTE_QUERY
;
1552 work
->new_state
= CMA_ROUTE_RESOLVED
;
1553 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1555 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1556 if (!route
->path_rec
) {
1561 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1567 kfree(route
->path_rec
);
1568 route
->path_rec
= NULL
;
1574 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1575 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1577 struct rdma_id_private
*id_priv
;
1580 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1581 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1584 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1585 if (!id
->route
.path_rec
) {
1590 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1593 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1596 EXPORT_SYMBOL(rdma_set_ib_paths
);
1598 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1600 struct cma_work
*work
;
1602 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1607 INIT_WORK(&work
->work
, cma_work_handler
);
1608 work
->old_state
= CMA_ROUTE_QUERY
;
1609 work
->new_state
= CMA_ROUTE_RESOLVED
;
1610 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1611 queue_work(cma_wq
, &work
->work
);
1615 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1617 struct rdma_id_private
*id_priv
;
1620 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1621 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1624 atomic_inc(&id_priv
->refcount
);
1625 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1626 case RDMA_TRANSPORT_IB
:
1627 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1629 case RDMA_TRANSPORT_IWARP
:
1630 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1641 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1642 cma_deref_id(id_priv
);
1645 EXPORT_SYMBOL(rdma_resolve_route
);
1647 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1649 struct cma_device
*cma_dev
;
1650 struct ib_port_attr port_attr
;
1657 if (list_empty(&dev_list
)) {
1661 list_for_each_entry(cma_dev
, &dev_list
, list
)
1662 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1663 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1664 port_attr
.state
== IB_PORT_ACTIVE
)
1668 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1671 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1675 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1679 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1680 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1681 id_priv
->id
.port_num
= p
;
1682 cma_attach_to_dev(id_priv
, cma_dev
);
1684 mutex_unlock(&lock
);
1688 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1689 struct rdma_dev_addr
*dev_addr
, void *context
)
1691 struct rdma_id_private
*id_priv
= context
;
1692 struct rdma_cm_event event
;
1694 memset(&event
, 0, sizeof event
);
1695 atomic_inc(&id_priv
->dev_remove
);
1698 * Grab mutex to block rdma_destroy_id() from removing the device while
1699 * we're trying to acquire it.
1702 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1703 mutex_unlock(&lock
);
1707 if (!status
&& !id_priv
->cma_dev
)
1708 status
= cma_acquire_dev(id_priv
);
1709 mutex_unlock(&lock
);
1712 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1714 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1715 event
.status
= status
;
1717 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1718 ip_addr_size(src_addr
));
1719 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1722 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1723 cma_exch(id_priv
, CMA_DESTROYING
);
1724 cma_enable_remove(id_priv
);
1725 cma_deref_id(id_priv
);
1726 rdma_destroy_id(&id_priv
->id
);
1730 cma_enable_remove(id_priv
);
1731 cma_deref_id(id_priv
);
1734 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1736 struct cma_work
*work
;
1737 struct sockaddr_in
*src_in
, *dst_in
;
1741 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1745 if (!id_priv
->cma_dev
) {
1746 ret
= cma_bind_loopback(id_priv
);
1751 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1752 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1754 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1755 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1756 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1757 src_in
->sin_family
= dst_in
->sin_family
;
1758 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1762 INIT_WORK(&work
->work
, cma_work_handler
);
1763 work
->old_state
= CMA_ADDR_QUERY
;
1764 work
->new_state
= CMA_ADDR_RESOLVED
;
1765 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1766 queue_work(cma_wq
, &work
->work
);
1773 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1774 struct sockaddr
*dst_addr
)
1776 if (src_addr
&& src_addr
->sa_family
)
1777 return rdma_bind_addr(id
, src_addr
);
1779 return cma_bind_any(id
, dst_addr
->sa_family
);
1782 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1783 struct sockaddr
*dst_addr
, int timeout_ms
)
1785 struct rdma_id_private
*id_priv
;
1788 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1789 if (id_priv
->state
== CMA_IDLE
) {
1790 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1795 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1798 atomic_inc(&id_priv
->refcount
);
1799 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1800 if (cma_any_addr(dst_addr
))
1801 ret
= cma_resolve_loopback(id_priv
);
1803 ret
= rdma_resolve_ip(&addr_client
, &id
->route
.addr
.src_addr
,
1804 dst_addr
, &id
->route
.addr
.dev_addr
,
1805 timeout_ms
, addr_handler
, id_priv
);
1811 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1812 cma_deref_id(id_priv
);
1815 EXPORT_SYMBOL(rdma_resolve_addr
);
1817 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1818 struct rdma_id_private
*id_priv
)
1820 struct sockaddr_in
*sin
;
1822 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1823 sin
->sin_port
= htons(bind_list
->port
);
1824 id_priv
->bind_list
= bind_list
;
1825 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1828 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1829 unsigned short snum
)
1831 struct rdma_bind_list
*bind_list
;
1834 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1839 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1840 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1846 ret
= -EADDRNOTAVAIL
;
1851 bind_list
->port
= (unsigned short) port
;
1852 cma_bind_port(bind_list
, id_priv
);
1855 idr_remove(ps
, port
);
1861 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1863 struct rdma_bind_list
*bind_list
;
1866 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1872 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1873 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1878 if (port
> sysctl_local_port_range
[1]) {
1879 if (next_port
!= sysctl_local_port_range
[0]) {
1880 idr_remove(ps
, port
);
1881 next_port
= sysctl_local_port_range
[0];
1884 ret
= -EADDRNOTAVAIL
;
1888 if (port
== sysctl_local_port_range
[1])
1889 next_port
= sysctl_local_port_range
[0];
1891 next_port
= port
+ 1;
1894 bind_list
->port
= (unsigned short) port
;
1895 cma_bind_port(bind_list
, id_priv
);
1898 idr_remove(ps
, port
);
1904 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1906 struct rdma_id_private
*cur_id
;
1907 struct sockaddr_in
*sin
, *cur_sin
;
1908 struct rdma_bind_list
*bind_list
;
1909 struct hlist_node
*node
;
1910 unsigned short snum
;
1912 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1913 snum
= ntohs(sin
->sin_port
);
1914 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1917 bind_list
= idr_find(ps
, snum
);
1919 return cma_alloc_port(ps
, id_priv
, snum
);
1922 * We don't support binding to any address if anyone is bound to
1923 * a specific address on the same port.
1925 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1926 return -EADDRNOTAVAIL
;
1928 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1929 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1930 return -EADDRNOTAVAIL
;
1932 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1933 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1937 cma_bind_port(bind_list
, id_priv
);
1941 static int cma_get_port(struct rdma_id_private
*id_priv
)
1946 switch (id_priv
->id
.ps
) {
1960 return -EPROTONOSUPPORT
;
1964 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
1965 ret
= cma_alloc_any_port(ps
, id_priv
);
1967 ret
= cma_use_port(ps
, id_priv
);
1968 mutex_unlock(&lock
);
1973 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1975 struct rdma_id_private
*id_priv
;
1978 if (addr
->sa_family
!= AF_INET
)
1979 return -EAFNOSUPPORT
;
1981 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1982 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
1985 if (!cma_any_addr(addr
)) {
1986 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
1991 ret
= cma_acquire_dev(id_priv
);
1992 mutex_unlock(&lock
);
1997 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
1998 ret
= cma_get_port(id_priv
);
2004 if (!cma_any_addr(addr
)) {
2006 cma_detach_from_dev(id_priv
);
2007 mutex_unlock(&lock
);
2010 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2013 EXPORT_SYMBOL(rdma_bind_addr
);
2015 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2016 struct rdma_route
*route
)
2018 struct sockaddr_in
*src4
, *dst4
;
2019 struct cma_hdr
*cma_hdr
;
2020 struct sdp_hh
*sdp_hdr
;
2022 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2023 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2028 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2030 sdp_set_ip_ver(sdp_hdr
, 4);
2031 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2032 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2033 sdp_hdr
->port
= src4
->sin_port
;
2037 cma_hdr
->cma_version
= CMA_VERSION
;
2038 cma_set_ip_ver(cma_hdr
, 4);
2039 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2040 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2041 cma_hdr
->port
= src4
->sin_port
;
2047 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2048 struct ib_cm_event
*ib_event
)
2050 struct rdma_id_private
*id_priv
= cm_id
->context
;
2051 struct rdma_cm_event event
;
2052 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2055 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
2058 memset(&event
, 0, sizeof event
);
2059 switch (ib_event
->event
) {
2060 case IB_CM_SIDR_REQ_ERROR
:
2061 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2062 event
.status
= -ETIMEDOUT
;
2064 case IB_CM_SIDR_REP_RECEIVED
:
2065 event
.param
.ud
.private_data
= ib_event
->private_data
;
2066 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2067 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2068 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2069 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2072 if (id_priv
->qkey
!= rep
->qkey
) {
2073 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2074 event
.status
= -EINVAL
;
2077 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2078 id_priv
->id
.route
.path_rec
,
2079 &event
.param
.ud
.ah_attr
);
2080 event
.param
.ud
.qp_num
= rep
->qpn
;
2081 event
.param
.ud
.qkey
= rep
->qkey
;
2082 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2086 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
2091 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2093 /* Destroy the CM ID by returning a non-zero value. */
2094 id_priv
->cm_id
.ib
= NULL
;
2095 cma_exch(id_priv
, CMA_DESTROYING
);
2096 cma_enable_remove(id_priv
);
2097 rdma_destroy_id(&id_priv
->id
);
2101 cma_enable_remove(id_priv
);
2105 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2106 struct rdma_conn_param
*conn_param
)
2108 struct ib_cm_sidr_req_param req
;
2109 struct rdma_route
*route
;
2112 req
.private_data_len
= sizeof(struct cma_hdr
) +
2113 conn_param
->private_data_len
;
2114 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2115 if (!req
.private_data
)
2118 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2119 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2120 conn_param
->private_data
, conn_param
->private_data_len
);
2122 route
= &id_priv
->id
.route
;
2123 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2127 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2128 cma_sidr_rep_handler
, id_priv
);
2129 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2130 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2134 req
.path
= route
->path_rec
;
2135 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2136 &route
->addr
.dst_addr
);
2137 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2138 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2140 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2142 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2143 id_priv
->cm_id
.ib
= NULL
;
2146 kfree(req
.private_data
);
2150 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2151 struct rdma_conn_param
*conn_param
)
2153 struct ib_cm_req_param req
;
2154 struct rdma_route
*route
;
2158 memset(&req
, 0, sizeof req
);
2159 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2160 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2161 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2165 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2166 memcpy(private_data
+ offset
, conn_param
->private_data
,
2167 conn_param
->private_data_len
);
2169 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2171 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2172 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2176 route
= &id_priv
->id
.route
;
2177 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2180 req
.private_data
= private_data
;
2182 req
.primary_path
= &route
->path_rec
[0];
2183 if (route
->num_paths
== 2)
2184 req
.alternate_path
= &route
->path_rec
[1];
2186 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2187 &route
->addr
.dst_addr
);
2188 req
.qp_num
= id_priv
->qp_num
;
2189 req
.qp_type
= IB_QPT_RC
;
2190 req
.starting_psn
= id_priv
->seq_num
;
2191 req
.responder_resources
= conn_param
->responder_resources
;
2192 req
.initiator_depth
= conn_param
->initiator_depth
;
2193 req
.flow_control
= conn_param
->flow_control
;
2194 req
.retry_count
= conn_param
->retry_count
;
2195 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2196 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2197 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2198 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2199 req
.srq
= id_priv
->srq
? 1 : 0;
2201 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2203 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2204 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2205 id_priv
->cm_id
.ib
= NULL
;
2208 kfree(private_data
);
2212 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2213 struct rdma_conn_param
*conn_param
)
2215 struct iw_cm_id
*cm_id
;
2216 struct sockaddr_in
* sin
;
2218 struct iw_cm_conn_param iw_param
;
2220 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2221 if (IS_ERR(cm_id
)) {
2222 ret
= PTR_ERR(cm_id
);
2226 id_priv
->cm_id
.iw
= cm_id
;
2228 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2229 cm_id
->local_addr
= *sin
;
2231 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2232 cm_id
->remote_addr
= *sin
;
2234 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2238 iw_param
.ord
= conn_param
->initiator_depth
;
2239 iw_param
.ird
= conn_param
->responder_resources
;
2240 iw_param
.private_data
= conn_param
->private_data
;
2241 iw_param
.private_data_len
= conn_param
->private_data_len
;
2243 iw_param
.qpn
= id_priv
->qp_num
;
2245 iw_param
.qpn
= conn_param
->qp_num
;
2246 ret
= iw_cm_connect(cm_id
, &iw_param
);
2248 if (ret
&& !IS_ERR(cm_id
)) {
2249 iw_destroy_cm_id(cm_id
);
2250 id_priv
->cm_id
.iw
= NULL
;
2255 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2257 struct rdma_id_private
*id_priv
;
2260 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2261 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2265 id_priv
->qp_num
= conn_param
->qp_num
;
2266 id_priv
->srq
= conn_param
->srq
;
2269 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2270 case RDMA_TRANSPORT_IB
:
2271 if (cma_is_ud_ps(id
->ps
))
2272 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2274 ret
= cma_connect_ib(id_priv
, conn_param
);
2276 case RDMA_TRANSPORT_IWARP
:
2277 ret
= cma_connect_iw(id_priv
, conn_param
);
2288 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2291 EXPORT_SYMBOL(rdma_connect
);
2293 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2294 struct rdma_conn_param
*conn_param
)
2296 struct ib_cm_rep_param rep
;
2297 struct ib_qp_attr qp_attr
;
2298 int qp_attr_mask
, ret
;
2300 if (id_priv
->id
.qp
) {
2301 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2305 qp_attr
.qp_state
= IB_QPS_RTS
;
2306 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, &qp_attr
,
2311 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
2312 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
2317 memset(&rep
, 0, sizeof rep
);
2318 rep
.qp_num
= id_priv
->qp_num
;
2319 rep
.starting_psn
= id_priv
->seq_num
;
2320 rep
.private_data
= conn_param
->private_data
;
2321 rep
.private_data_len
= conn_param
->private_data_len
;
2322 rep
.responder_resources
= conn_param
->responder_resources
;
2323 rep
.initiator_depth
= conn_param
->initiator_depth
;
2324 rep
.target_ack_delay
= CMA_CM_RESPONSE_TIMEOUT
;
2325 rep
.failover_accepted
= 0;
2326 rep
.flow_control
= conn_param
->flow_control
;
2327 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2328 rep
.srq
= id_priv
->srq
? 1 : 0;
2330 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2335 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2336 struct rdma_conn_param
*conn_param
)
2338 struct iw_cm_conn_param iw_param
;
2341 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2345 iw_param
.ord
= conn_param
->initiator_depth
;
2346 iw_param
.ird
= conn_param
->responder_resources
;
2347 iw_param
.private_data
= conn_param
->private_data
;
2348 iw_param
.private_data_len
= conn_param
->private_data_len
;
2349 if (id_priv
->id
.qp
) {
2350 iw_param
.qpn
= id_priv
->qp_num
;
2352 iw_param
.qpn
= conn_param
->qp_num
;
2354 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2357 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2358 enum ib_cm_sidr_status status
,
2359 const void *private_data
, int private_data_len
)
2361 struct ib_cm_sidr_rep_param rep
;
2363 memset(&rep
, 0, sizeof rep
);
2364 rep
.status
= status
;
2365 if (status
== IB_SIDR_SUCCESS
) {
2366 rep
.qp_num
= id_priv
->qp_num
;
2367 rep
.qkey
= id_priv
->qkey
;
2369 rep
.private_data
= private_data
;
2370 rep
.private_data_len
= private_data_len
;
2372 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2375 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2377 struct rdma_id_private
*id_priv
;
2380 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2381 if (!cma_comp(id_priv
, CMA_CONNECT
))
2384 if (!id
->qp
&& conn_param
) {
2385 id_priv
->qp_num
= conn_param
->qp_num
;
2386 id_priv
->srq
= conn_param
->srq
;
2389 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2390 case RDMA_TRANSPORT_IB
:
2391 if (cma_is_ud_ps(id
->ps
))
2392 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2393 conn_param
->private_data
,
2394 conn_param
->private_data_len
);
2395 else if (conn_param
)
2396 ret
= cma_accept_ib(id_priv
, conn_param
);
2398 ret
= cma_rep_recv(id_priv
);
2400 case RDMA_TRANSPORT_IWARP
:
2401 ret
= cma_accept_iw(id_priv
, conn_param
);
2413 cma_modify_qp_err(id
);
2414 rdma_reject(id
, NULL
, 0);
2417 EXPORT_SYMBOL(rdma_accept
);
2419 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2421 struct rdma_id_private
*id_priv
;
2424 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2425 if (!cma_comp(id_priv
, CMA_CONNECT
))
2428 switch (id
->device
->node_type
) {
2429 case RDMA_NODE_IB_CA
:
2430 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2438 EXPORT_SYMBOL(rdma_notify
);
2440 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2441 u8 private_data_len
)
2443 struct rdma_id_private
*id_priv
;
2446 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2447 if (!cma_comp(id_priv
, CMA_CONNECT
))
2450 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2451 case RDMA_TRANSPORT_IB
:
2452 if (cma_is_ud_ps(id
->ps
))
2453 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2454 private_data
, private_data_len
);
2456 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2457 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2458 0, private_data
, private_data_len
);
2460 case RDMA_TRANSPORT_IWARP
:
2461 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2462 private_data
, private_data_len
);
2470 EXPORT_SYMBOL(rdma_reject
);
2472 int rdma_disconnect(struct rdma_cm_id
*id
)
2474 struct rdma_id_private
*id_priv
;
2477 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2478 if (!cma_comp(id_priv
, CMA_CONNECT
) &&
2479 !cma_comp(id_priv
, CMA_DISCONNECT
))
2482 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2483 case RDMA_TRANSPORT_IB
:
2484 ret
= cma_modify_qp_err(id
);
2487 /* Initiate or respond to a disconnect. */
2488 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2489 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2491 case RDMA_TRANSPORT_IWARP
:
2492 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2501 EXPORT_SYMBOL(rdma_disconnect
);
2503 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2505 struct rdma_id_private
*id_priv
;
2506 struct cma_multicast
*mc
= multicast
->context
;
2507 struct rdma_cm_event event
;
2510 id_priv
= mc
->id_priv
;
2511 if (cma_disable_remove(id_priv
, CMA_ADDR_BOUND
) &&
2512 cma_disable_remove(id_priv
, CMA_ADDR_RESOLVED
))
2515 if (!status
&& id_priv
->id
.qp
)
2516 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2517 multicast
->rec
.mlid
);
2519 memset(&event
, 0, sizeof event
);
2520 event
.status
= status
;
2521 event
.param
.ud
.private_data
= mc
->context
;
2523 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2524 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2525 id_priv
->id
.port_num
, &multicast
->rec
,
2526 &event
.param
.ud
.ah_attr
);
2527 event
.param
.ud
.qp_num
= 0xFFFFFF;
2528 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2530 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2532 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2534 cma_exch(id_priv
, CMA_DESTROYING
);
2535 cma_enable_remove(id_priv
);
2536 rdma_destroy_id(&id_priv
->id
);
2540 cma_enable_remove(id_priv
);
2544 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2545 struct sockaddr
*addr
, union ib_gid
*mgid
)
2547 unsigned char mc_map
[MAX_ADDR_LEN
];
2548 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2549 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2550 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2552 if (cma_any_addr(addr
)) {
2553 memset(mgid
, 0, sizeof *mgid
);
2554 } else if ((addr
->sa_family
== AF_INET6
) &&
2555 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFF10A01B) ==
2557 /* IPv6 address is an SA assigned MGID. */
2558 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2560 ip_ib_mc_map(sin
->sin_addr
.s_addr
, mc_map
);
2561 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2562 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2563 mc_map
[8] = ib_addr_get_pkey(dev_addr
) >> 8;
2564 mc_map
[9] = (unsigned char) ib_addr_get_pkey(dev_addr
);
2565 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2569 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2570 struct cma_multicast
*mc
)
2572 struct ib_sa_mcmember_rec rec
;
2573 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2574 ib_sa_comp_mask comp_mask
;
2577 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2578 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2583 cma_set_mgid(id_priv
, &mc
->addr
, &rec
.mgid
);
2584 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2585 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2586 ib_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2587 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2590 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2591 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2592 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2593 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2594 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2596 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2597 id_priv
->id
.port_num
, &rec
,
2598 comp_mask
, GFP_KERNEL
,
2599 cma_ib_mc_handler
, mc
);
2600 if (IS_ERR(mc
->multicast
.ib
))
2601 return PTR_ERR(mc
->multicast
.ib
);
2606 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2609 struct rdma_id_private
*id_priv
;
2610 struct cma_multicast
*mc
;
2613 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2614 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2615 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2618 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2622 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2623 mc
->context
= context
;
2624 mc
->id_priv
= id_priv
;
2626 spin_lock(&id_priv
->lock
);
2627 list_add(&mc
->list
, &id_priv
->mc_list
);
2628 spin_unlock(&id_priv
->lock
);
2630 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2631 case RDMA_TRANSPORT_IB
:
2632 ret
= cma_join_ib_multicast(id_priv
, mc
);
2640 spin_lock_irq(&id_priv
->lock
);
2641 list_del(&mc
->list
);
2642 spin_unlock_irq(&id_priv
->lock
);
2647 EXPORT_SYMBOL(rdma_join_multicast
);
2649 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2651 struct rdma_id_private
*id_priv
;
2652 struct cma_multicast
*mc
;
2654 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2655 spin_lock_irq(&id_priv
->lock
);
2656 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2657 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2658 list_del(&mc
->list
);
2659 spin_unlock_irq(&id_priv
->lock
);
2662 ib_detach_mcast(id
->qp
,
2663 &mc
->multicast
.ib
->rec
.mgid
,
2664 mc
->multicast
.ib
->rec
.mlid
);
2665 ib_sa_free_multicast(mc
->multicast
.ib
);
2670 spin_unlock_irq(&id_priv
->lock
);
2672 EXPORT_SYMBOL(rdma_leave_multicast
);
2674 static void cma_add_one(struct ib_device
*device
)
2676 struct cma_device
*cma_dev
;
2677 struct rdma_id_private
*id_priv
;
2679 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2683 cma_dev
->device
= device
;
2685 init_completion(&cma_dev
->comp
);
2686 atomic_set(&cma_dev
->refcount
, 1);
2687 INIT_LIST_HEAD(&cma_dev
->id_list
);
2688 ib_set_client_data(device
, &cma_client
, cma_dev
);
2691 list_add_tail(&cma_dev
->list
, &dev_list
);
2692 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2693 cma_listen_on_dev(id_priv
, cma_dev
);
2694 mutex_unlock(&lock
);
2697 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2699 struct rdma_cm_event event
;
2700 enum cma_state state
;
2702 /* Record that we want to remove the device */
2703 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2704 if (state
== CMA_DESTROYING
)
2707 cma_cancel_operation(id_priv
, state
);
2708 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
2710 /* Check for destruction from another callback. */
2711 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2714 memset(&event
, 0, sizeof event
);
2715 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2716 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2719 static void cma_process_remove(struct cma_device
*cma_dev
)
2721 struct rdma_id_private
*id_priv
;
2725 while (!list_empty(&cma_dev
->id_list
)) {
2726 id_priv
= list_entry(cma_dev
->id_list
.next
,
2727 struct rdma_id_private
, list
);
2729 if (cma_internal_listen(id_priv
)) {
2730 cma_destroy_listen(id_priv
);
2734 list_del_init(&id_priv
->list
);
2735 atomic_inc(&id_priv
->refcount
);
2736 mutex_unlock(&lock
);
2738 ret
= cma_remove_id_dev(id_priv
);
2739 cma_deref_id(id_priv
);
2741 rdma_destroy_id(&id_priv
->id
);
2745 mutex_unlock(&lock
);
2747 cma_deref_dev(cma_dev
);
2748 wait_for_completion(&cma_dev
->comp
);
2751 static void cma_remove_one(struct ib_device
*device
)
2753 struct cma_device
*cma_dev
;
2755 cma_dev
= ib_get_client_data(device
, &cma_client
);
2760 list_del(&cma_dev
->list
);
2761 mutex_unlock(&lock
);
2763 cma_process_remove(cma_dev
);
2767 static int cma_init(void)
2771 get_random_bytes(&next_port
, sizeof next_port
);
2772 next_port
= (next_port
% (sysctl_local_port_range
[1] -
2773 sysctl_local_port_range
[0])) +
2774 sysctl_local_port_range
[0];
2775 cma_wq
= create_singlethread_workqueue("rdma_cm");
2779 ib_sa_register_client(&sa_client
);
2780 rdma_addr_register_client(&addr_client
);
2782 ret
= ib_register_client(&cma_client
);
2788 rdma_addr_unregister_client(&addr_client
);
2789 ib_sa_unregister_client(&sa_client
);
2790 destroy_workqueue(cma_wq
);
2794 static void cma_cleanup(void)
2796 ib_unregister_client(&cma_client
);
2797 rdma_addr_unregister_client(&addr_client
);
2798 ib_sa_unregister_client(&sa_client
);
2799 destroy_workqueue(cma_wq
);
2800 idr_destroy(&sdp_ps
);
2801 idr_destroy(&tcp_ps
);
2802 idr_destroy(&udp_ps
);
2803 idr_destroy(&ipoib_ps
);
2806 module_init(cma_init
);
2807 module_exit(cma_cleanup
);