2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
56 static void cma_add_one(struct ib_device
*device
);
57 static void cma_remove_one(struct ib_device
*device
);
59 static struct ib_client cma_client
= {
62 .remove
= cma_remove_one
65 static struct ib_sa_client sa_client
;
66 static LIST_HEAD(dev_list
);
67 static LIST_HEAD(listen_any_list
);
68 static DEFINE_MUTEX(lock
);
69 static struct workqueue_struct
*cma_wq
;
70 static DEFINE_IDR(sdp_ps
);
71 static DEFINE_IDR(tcp_ps
);
74 struct list_head list
;
75 struct ib_device
*device
;
77 struct completion comp
;
79 struct list_head id_list
;
96 struct rdma_bind_list
{
98 struct hlist_head owners
;
103 * Device removal can occur at anytime, so we need extra handling to
104 * serialize notifying the user of device removal with other callbacks.
105 * We do this by disabling removal notification while a callback is in process,
106 * and reporting it after the callback completes.
108 struct rdma_id_private
{
109 struct rdma_cm_id id
;
111 struct rdma_bind_list
*bind_list
;
112 struct hlist_node node
;
113 struct list_head list
;
114 struct list_head listen_list
;
115 struct cma_device
*cma_dev
;
117 enum cma_state state
;
119 struct completion comp
;
121 wait_queue_head_t wait_remove
;
126 struct ib_sa_query
*query
;
135 enum ib_qp_type qp_type
;
140 struct work_struct work
;
141 struct rdma_id_private
*id
;
142 enum cma_state old_state
;
143 enum cma_state new_state
;
144 struct rdma_cm_event event
;
157 u8 ip_version
; /* IP version: 7:4 */
159 union cma_ip_addr src_addr
;
160 union cma_ip_addr dst_addr
;
165 u8 sdp_version
; /* Major version: 7:4 */
166 u8 ip_version
; /* IP version: 7:4 */
167 u8 sdp_specific1
[10];
170 union cma_ip_addr src_addr
;
171 union cma_ip_addr dst_addr
;
179 #define CMA_VERSION 0x00
180 #define SDP_MAJ_VERSION 0x2
182 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
187 spin_lock_irqsave(&id_priv
->lock
, flags
);
188 ret
= (id_priv
->state
== comp
);
189 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
193 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
194 enum cma_state comp
, enum cma_state exch
)
199 spin_lock_irqsave(&id_priv
->lock
, flags
);
200 if ((ret
= (id_priv
->state
== comp
)))
201 id_priv
->state
= exch
;
202 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
206 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
212 spin_lock_irqsave(&id_priv
->lock
, flags
);
213 old
= id_priv
->state
;
214 id_priv
->state
= exch
;
215 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
219 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
221 return hdr
->ip_version
>> 4;
224 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
226 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
229 static inline u8
sdp_get_majv(u8 sdp_version
)
231 return sdp_version
>> 4;
234 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
236 return hh
->ip_version
>> 4;
239 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
241 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
244 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
245 struct cma_device
*cma_dev
)
247 atomic_inc(&cma_dev
->refcount
);
248 id_priv
->cma_dev
= cma_dev
;
249 id_priv
->id
.device
= cma_dev
->device
;
250 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
253 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
255 if (atomic_dec_and_test(&cma_dev
->refcount
))
256 complete(&cma_dev
->comp
);
259 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
261 list_del(&id_priv
->list
);
262 cma_deref_dev(id_priv
->cma_dev
);
263 id_priv
->cma_dev
= NULL
;
266 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
268 enum rdma_node_type dev_type
= id_priv
->id
.route
.addr
.dev_addr
.dev_type
;
269 struct cma_device
*cma_dev
;
273 switch (rdma_node_get_transport(dev_type
)) {
274 case RDMA_TRANSPORT_IB
:
275 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
277 case RDMA_TRANSPORT_IWARP
:
278 iw_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
284 list_for_each_entry(cma_dev
, &dev_list
, list
) {
285 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
286 &id_priv
->id
.port_num
, NULL
);
288 cma_attach_to_dev(id_priv
, cma_dev
);
295 static void cma_deref_id(struct rdma_id_private
*id_priv
)
297 if (atomic_dec_and_test(&id_priv
->refcount
))
298 complete(&id_priv
->comp
);
301 static void cma_release_remove(struct rdma_id_private
*id_priv
)
303 if (atomic_dec_and_test(&id_priv
->dev_remove
))
304 wake_up(&id_priv
->wait_remove
);
307 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
308 void *context
, enum rdma_port_space ps
)
310 struct rdma_id_private
*id_priv
;
312 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
314 return ERR_PTR(-ENOMEM
);
316 id_priv
->state
= CMA_IDLE
;
317 id_priv
->id
.context
= context
;
318 id_priv
->id
.event_handler
= event_handler
;
320 spin_lock_init(&id_priv
->lock
);
321 init_completion(&id_priv
->comp
);
322 atomic_set(&id_priv
->refcount
, 1);
323 init_waitqueue_head(&id_priv
->wait_remove
);
324 atomic_set(&id_priv
->dev_remove
, 0);
325 INIT_LIST_HEAD(&id_priv
->listen_list
);
326 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
330 EXPORT_SYMBOL(rdma_create_id
);
332 static int cma_init_ib_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
334 struct ib_qp_attr qp_attr
;
335 struct rdma_dev_addr
*dev_addr
;
338 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
339 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
340 ib_addr_get_pkey(dev_addr
),
341 &qp_attr
.pkey_index
);
345 qp_attr
.qp_state
= IB_QPS_INIT
;
346 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
347 qp_attr
.port_num
= id_priv
->id
.port_num
;
348 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
349 IB_QP_PKEY_INDEX
| IB_QP_PORT
);
352 static int cma_init_iw_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
354 struct ib_qp_attr qp_attr
;
356 qp_attr
.qp_state
= IB_QPS_INIT
;
357 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
359 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_ACCESS_FLAGS
);
362 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
363 struct ib_qp_init_attr
*qp_init_attr
)
365 struct rdma_id_private
*id_priv
;
369 id_priv
= container_of(id
, struct rdma_id_private
, id
);
370 if (id
->device
!= pd
->device
)
373 qp
= ib_create_qp(pd
, qp_init_attr
);
377 switch (rdma_node_get_transport(id
->device
->node_type
)) {
378 case RDMA_TRANSPORT_IB
:
379 ret
= cma_init_ib_qp(id_priv
, qp
);
381 case RDMA_TRANSPORT_IWARP
:
382 ret
= cma_init_iw_qp(id_priv
, qp
);
393 id_priv
->qp_num
= qp
->qp_num
;
394 id_priv
->qp_type
= qp
->qp_type
;
395 id_priv
->srq
= (qp
->srq
!= NULL
);
401 EXPORT_SYMBOL(rdma_create_qp
);
403 void rdma_destroy_qp(struct rdma_cm_id
*id
)
405 ib_destroy_qp(id
->qp
);
407 EXPORT_SYMBOL(rdma_destroy_qp
);
409 static int cma_modify_qp_rtr(struct rdma_cm_id
*id
)
411 struct ib_qp_attr qp_attr
;
412 int qp_attr_mask
, ret
;
417 /* Need to update QP attributes from default values. */
418 qp_attr
.qp_state
= IB_QPS_INIT
;
419 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
423 ret
= ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
427 qp_attr
.qp_state
= IB_QPS_RTR
;
428 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
432 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
435 static int cma_modify_qp_rts(struct rdma_cm_id
*id
)
437 struct ib_qp_attr qp_attr
;
438 int qp_attr_mask
, ret
;
443 qp_attr
.qp_state
= IB_QPS_RTS
;
444 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
448 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
451 static int cma_modify_qp_err(struct rdma_cm_id
*id
)
453 struct ib_qp_attr qp_attr
;
458 qp_attr
.qp_state
= IB_QPS_ERR
;
459 return ib_modify_qp(id
->qp
, &qp_attr
, IB_QP_STATE
);
462 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
465 struct rdma_id_private
*id_priv
;
468 id_priv
= container_of(id
, struct rdma_id_private
, id
);
469 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
470 case RDMA_TRANSPORT_IB
:
471 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
473 if (qp_attr
->qp_state
== IB_QPS_RTR
)
474 qp_attr
->rq_psn
= id_priv
->seq_num
;
476 case RDMA_TRANSPORT_IWARP
:
477 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
487 EXPORT_SYMBOL(rdma_init_qp_attr
);
489 static inline int cma_zero_addr(struct sockaddr
*addr
)
491 struct in6_addr
*ip6
;
493 if (addr
->sa_family
== AF_INET
)
494 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
496 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
497 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
498 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
502 static inline int cma_loopback_addr(struct sockaddr
*addr
)
504 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
507 static inline int cma_any_addr(struct sockaddr
*addr
)
509 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
512 static inline int cma_any_port(struct sockaddr
*addr
)
514 return !((struct sockaddr_in
*) addr
)->sin_port
;
517 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
518 u8
*ip_ver
, __u16
*port
,
519 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
523 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
527 *ip_ver
= sdp_get_ip_ver(hdr
);
528 *port
= ((struct sdp_hh
*) hdr
)->port
;
529 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
530 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
533 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
536 *ip_ver
= cma_get_ip_ver(hdr
);
537 *port
= ((struct cma_hdr
*) hdr
)->port
;
538 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
539 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
543 if (*ip_ver
!= 4 && *ip_ver
!= 6)
548 static void cma_save_net_info(struct rdma_addr
*addr
,
549 struct rdma_addr
*listen_addr
,
550 u8 ip_ver
, __u16 port
,
551 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
553 struct sockaddr_in
*listen4
, *ip4
;
554 struct sockaddr_in6
*listen6
, *ip6
;
558 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
559 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
560 ip4
->sin_family
= listen4
->sin_family
;
561 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
562 ip4
->sin_port
= listen4
->sin_port
;
564 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
565 ip4
->sin_family
= listen4
->sin_family
;
566 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
567 ip4
->sin_port
= port
;
570 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
571 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
572 ip6
->sin6_family
= listen6
->sin6_family
;
573 ip6
->sin6_addr
= dst
->ip6
;
574 ip6
->sin6_port
= listen6
->sin6_port
;
576 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
577 ip6
->sin6_family
= listen6
->sin6_family
;
578 ip6
->sin6_addr
= src
->ip6
;
579 ip6
->sin6_port
= port
;
586 static inline int cma_user_data_offset(enum rdma_port_space ps
)
592 return sizeof(struct cma_hdr
);
596 static int cma_notify_user(struct rdma_id_private
*id_priv
,
597 enum rdma_cm_event_type type
, int status
,
598 void *data
, u8 data_len
)
600 struct rdma_cm_event event
;
603 event
.status
= status
;
604 event
.private_data
= data
;
605 event
.private_data_len
= data_len
;
607 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
610 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
612 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
613 case RDMA_TRANSPORT_IB
:
615 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
622 static inline int cma_internal_listen(struct rdma_id_private
*id_priv
)
624 return (id_priv
->state
== CMA_LISTEN
) && id_priv
->cma_dev
&&
625 cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
);
628 static void cma_destroy_listen(struct rdma_id_private
*id_priv
)
630 cma_exch(id_priv
, CMA_DESTROYING
);
632 if (id_priv
->cma_dev
) {
633 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
634 case RDMA_TRANSPORT_IB
:
635 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
636 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
638 case RDMA_TRANSPORT_IWARP
:
639 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
640 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
645 cma_detach_from_dev(id_priv
);
647 list_del(&id_priv
->listen_list
);
649 cma_deref_id(id_priv
);
650 wait_for_completion(&id_priv
->comp
);
655 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
657 struct rdma_id_private
*dev_id_priv
;
660 list_del(&id_priv
->list
);
662 while (!list_empty(&id_priv
->listen_list
)) {
663 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
664 struct rdma_id_private
, listen_list
);
665 cma_destroy_listen(dev_id_priv
);
670 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
671 enum cma_state state
)
675 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
677 case CMA_ROUTE_QUERY
:
678 cma_cancel_route(id_priv
);
681 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
683 cma_cancel_listens(id_priv
);
690 static void cma_release_port(struct rdma_id_private
*id_priv
)
692 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
698 hlist_del(&id_priv
->node
);
699 if (hlist_empty(&bind_list
->owners
)) {
700 idr_remove(bind_list
->ps
, bind_list
->port
);
706 void rdma_destroy_id(struct rdma_cm_id
*id
)
708 struct rdma_id_private
*id_priv
;
709 enum cma_state state
;
711 id_priv
= container_of(id
, struct rdma_id_private
, id
);
712 state
= cma_exch(id_priv
, CMA_DESTROYING
);
713 cma_cancel_operation(id_priv
, state
);
716 if (id_priv
->cma_dev
) {
718 switch (rdma_node_get_transport(id
->device
->node_type
)) {
719 case RDMA_TRANSPORT_IB
:
720 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
721 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
723 case RDMA_TRANSPORT_IWARP
:
724 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
725 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
731 cma_detach_from_dev(id_priv
);
735 cma_release_port(id_priv
);
736 cma_deref_id(id_priv
);
737 wait_for_completion(&id_priv
->comp
);
739 kfree(id_priv
->id
.route
.path_rec
);
742 EXPORT_SYMBOL(rdma_destroy_id
);
744 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
748 ret
= cma_modify_qp_rtr(&id_priv
->id
);
752 ret
= cma_modify_qp_rts(&id_priv
->id
);
756 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
762 cma_modify_qp_err(&id_priv
->id
);
763 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
768 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
770 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
771 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
778 static int cma_rtu_recv(struct rdma_id_private
*id_priv
)
782 ret
= cma_modify_qp_rts(&id_priv
->id
);
788 cma_modify_qp_err(&id_priv
->id
);
789 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
794 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
796 struct rdma_id_private
*id_priv
= cm_id
->context
;
797 enum rdma_cm_event_type event
;
798 u8 private_data_len
= 0;
799 int ret
= 0, status
= 0;
801 atomic_inc(&id_priv
->dev_remove
);
802 if (!cma_comp(id_priv
, CMA_CONNECT
))
805 switch (ib_event
->event
) {
806 case IB_CM_REQ_ERROR
:
807 case IB_CM_REP_ERROR
:
808 event
= RDMA_CM_EVENT_UNREACHABLE
;
811 case IB_CM_REP_RECEIVED
:
812 status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
814 event
= RDMA_CM_EVENT_CONNECT_ERROR
;
815 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
816 status
= cma_rep_recv(id_priv
);
817 event
= status
? RDMA_CM_EVENT_CONNECT_ERROR
:
818 RDMA_CM_EVENT_ESTABLISHED
;
820 event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
821 private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
823 case IB_CM_RTU_RECEIVED
:
824 status
= cma_rtu_recv(id_priv
);
825 event
= status
? RDMA_CM_EVENT_CONNECT_ERROR
:
826 RDMA_CM_EVENT_ESTABLISHED
;
828 case IB_CM_DREQ_ERROR
:
829 status
= -ETIMEDOUT
; /* fall through */
830 case IB_CM_DREQ_RECEIVED
:
831 case IB_CM_DREP_RECEIVED
:
832 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
834 event
= RDMA_CM_EVENT_DISCONNECTED
;
836 case IB_CM_TIMEWAIT_EXIT
:
837 case IB_CM_MRA_RECEIVED
:
840 case IB_CM_REJ_RECEIVED
:
841 cma_modify_qp_err(&id_priv
->id
);
842 status
= ib_event
->param
.rej_rcvd
.reason
;
843 event
= RDMA_CM_EVENT_REJECTED
;
844 private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
847 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
852 ret
= cma_notify_user(id_priv
, event
, status
, ib_event
->private_data
,
855 /* Destroy the CM ID by returning a non-zero value. */
856 id_priv
->cm_id
.ib
= NULL
;
857 cma_exch(id_priv
, CMA_DESTROYING
);
858 cma_release_remove(id_priv
);
859 rdma_destroy_id(&id_priv
->id
);
863 cma_release_remove(id_priv
);
867 static struct rdma_id_private
*cma_new_id(struct rdma_cm_id
*listen_id
,
868 struct ib_cm_event
*ib_event
)
870 struct rdma_id_private
*id_priv
;
871 struct rdma_cm_id
*id
;
872 struct rdma_route
*rt
;
873 union cma_ip_addr
*src
, *dst
;
877 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
878 &ip_ver
, &port
, &src
, &dst
))
881 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
886 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
887 ip_ver
, port
, src
, dst
);
890 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
891 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
896 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
897 if (rt
->num_paths
== 2)
898 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
900 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
901 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
902 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
903 rt
->addr
.dev_addr
.dev_type
= RDMA_NODE_IB_CA
;
905 id_priv
= container_of(id
, struct rdma_id_private
, id
);
906 id_priv
->state
= CMA_CONNECT
;
915 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
917 struct rdma_id_private
*listen_id
, *conn_id
;
920 listen_id
= cm_id
->context
;
921 atomic_inc(&listen_id
->dev_remove
);
922 if (!cma_comp(listen_id
, CMA_LISTEN
)) {
927 conn_id
= cma_new_id(&listen_id
->id
, ib_event
);
933 atomic_inc(&conn_id
->dev_remove
);
935 ret
= cma_acquire_dev(conn_id
);
939 cma_exch(conn_id
, CMA_DESTROYING
);
940 cma_release_remove(conn_id
);
941 rdma_destroy_id(&conn_id
->id
);
945 conn_id
->cm_id
.ib
= cm_id
;
946 cm_id
->context
= conn_id
;
947 cm_id
->cm_handler
= cma_ib_handler
;
949 offset
= cma_user_data_offset(listen_id
->id
.ps
);
950 ret
= cma_notify_user(conn_id
, RDMA_CM_EVENT_CONNECT_REQUEST
, 0,
951 ib_event
->private_data
+ offset
,
952 IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
);
954 /* Destroy the CM ID by returning a non-zero value. */
955 conn_id
->cm_id
.ib
= NULL
;
956 cma_exch(conn_id
, CMA_DESTROYING
);
957 cma_release_remove(conn_id
);
958 rdma_destroy_id(&conn_id
->id
);
961 cma_release_remove(listen_id
);
965 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
967 return cpu_to_be64(((u64
)ps
<< 16) +
968 be16_to_cpu(((struct sockaddr_in
*) addr
)->sin_port
));
971 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
972 struct ib_cm_compare_data
*compare
)
974 struct cma_hdr
*cma_data
, *cma_mask
;
975 struct sdp_hh
*sdp_data
, *sdp_mask
;
977 struct in6_addr ip6_addr
;
979 memset(compare
, 0, sizeof *compare
);
980 cma_data
= (void *) compare
->data
;
981 cma_mask
= (void *) compare
->mask
;
982 sdp_data
= (void *) compare
->data
;
983 sdp_mask
= (void *) compare
->mask
;
985 switch (addr
->sa_family
) {
987 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
988 if (ps
== RDMA_PS_SDP
) {
989 sdp_set_ip_ver(sdp_data
, 4);
990 sdp_set_ip_ver(sdp_mask
, 0xF);
991 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
992 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
994 cma_set_ip_ver(cma_data
, 4);
995 cma_set_ip_ver(cma_mask
, 0xF);
996 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
997 cma_mask
->dst_addr
.ip4
.addr
= ~0;
1001 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1002 if (ps
== RDMA_PS_SDP
) {
1003 sdp_set_ip_ver(sdp_data
, 6);
1004 sdp_set_ip_ver(sdp_mask
, 0xF);
1005 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1006 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1007 sizeof sdp_mask
->dst_addr
.ip6
);
1009 cma_set_ip_ver(cma_data
, 6);
1010 cma_set_ip_ver(cma_mask
, 0xF);
1011 cma_data
->dst_addr
.ip6
= ip6_addr
;
1012 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1013 sizeof cma_mask
->dst_addr
.ip6
);
1021 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1023 struct rdma_id_private
*id_priv
= iw_id
->context
;
1024 enum rdma_cm_event_type event
= 0;
1025 struct sockaddr_in
*sin
;
1028 atomic_inc(&id_priv
->dev_remove
);
1030 switch (iw_event
->event
) {
1031 case IW_CM_EVENT_CLOSE
:
1032 event
= RDMA_CM_EVENT_DISCONNECTED
;
1034 case IW_CM_EVENT_CONNECT_REPLY
:
1035 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1036 *sin
= iw_event
->local_addr
;
1037 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1038 *sin
= iw_event
->remote_addr
;
1039 if (iw_event
->status
)
1040 event
= RDMA_CM_EVENT_REJECTED
;
1042 event
= RDMA_CM_EVENT_ESTABLISHED
;
1044 case IW_CM_EVENT_ESTABLISHED
:
1045 event
= RDMA_CM_EVENT_ESTABLISHED
;
1051 ret
= cma_notify_user(id_priv
, event
, iw_event
->status
,
1052 iw_event
->private_data
,
1053 iw_event
->private_data_len
);
1055 /* Destroy the CM ID by returning a non-zero value. */
1056 id_priv
->cm_id
.iw
= NULL
;
1057 cma_exch(id_priv
, CMA_DESTROYING
);
1058 cma_release_remove(id_priv
);
1059 rdma_destroy_id(&id_priv
->id
);
1063 cma_release_remove(id_priv
);
1067 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1068 struct iw_cm_event
*iw_event
)
1070 struct rdma_cm_id
*new_cm_id
;
1071 struct rdma_id_private
*listen_id
, *conn_id
;
1072 struct sockaddr_in
*sin
;
1073 struct net_device
*dev
= NULL
;
1076 listen_id
= cm_id
->context
;
1077 atomic_inc(&listen_id
->dev_remove
);
1078 if (!cma_comp(listen_id
, CMA_LISTEN
)) {
1079 ret
= -ECONNABORTED
;
1083 /* Create a new RDMA id for the new IW CM ID */
1084 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1085 listen_id
->id
.context
,
1091 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1092 atomic_inc(&conn_id
->dev_remove
);
1093 conn_id
->state
= CMA_CONNECT
;
1095 dev
= ip_dev_find(iw_event
->local_addr
.sin_addr
.s_addr
);
1097 ret
= -EADDRNOTAVAIL
;
1098 cma_release_remove(conn_id
);
1099 rdma_destroy_id(new_cm_id
);
1102 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1104 cma_release_remove(conn_id
);
1105 rdma_destroy_id(new_cm_id
);
1110 ret
= cma_acquire_dev(conn_id
);
1111 mutex_unlock(&lock
);
1113 cma_release_remove(conn_id
);
1114 rdma_destroy_id(new_cm_id
);
1118 conn_id
->cm_id
.iw
= cm_id
;
1119 cm_id
->context
= conn_id
;
1120 cm_id
->cm_handler
= cma_iw_handler
;
1122 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1123 *sin
= iw_event
->local_addr
;
1124 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1125 *sin
= iw_event
->remote_addr
;
1127 ret
= cma_notify_user(conn_id
, RDMA_CM_EVENT_CONNECT_REQUEST
, 0,
1128 iw_event
->private_data
,
1129 iw_event
->private_data_len
);
1131 /* User wants to destroy the CM ID */
1132 conn_id
->cm_id
.iw
= NULL
;
1133 cma_exch(conn_id
, CMA_DESTROYING
);
1134 cma_release_remove(conn_id
);
1135 rdma_destroy_id(&conn_id
->id
);
1141 cma_release_remove(listen_id
);
1145 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1147 struct ib_cm_compare_data compare_data
;
1148 struct sockaddr
*addr
;
1152 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1154 if (IS_ERR(id_priv
->cm_id
.ib
))
1155 return PTR_ERR(id_priv
->cm_id
.ib
);
1157 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1158 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1159 if (cma_any_addr(addr
))
1160 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1162 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1163 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1167 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1168 id_priv
->cm_id
.ib
= NULL
;
1174 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1177 struct sockaddr_in
*sin
;
1179 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1180 iw_conn_req_handler
,
1182 if (IS_ERR(id_priv
->cm_id
.iw
))
1183 return PTR_ERR(id_priv
->cm_id
.iw
);
1185 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1186 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1188 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1191 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1192 id_priv
->cm_id
.iw
= NULL
;
1198 static int cma_listen_handler(struct rdma_cm_id
*id
,
1199 struct rdma_cm_event
*event
)
1201 struct rdma_id_private
*id_priv
= id
->context
;
1203 id
->context
= id_priv
->id
.context
;
1204 id
->event_handler
= id_priv
->id
.event_handler
;
1205 return id_priv
->id
.event_handler(id
, event
);
1208 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1209 struct cma_device
*cma_dev
)
1211 struct rdma_id_private
*dev_id_priv
;
1212 struct rdma_cm_id
*id
;
1215 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1219 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1221 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1222 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1223 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1225 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1226 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1228 ret
= rdma_listen(id
, id_priv
->backlog
);
1234 cma_destroy_listen(dev_id_priv
);
1237 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1239 struct cma_device
*cma_dev
;
1242 list_add_tail(&id_priv
->list
, &listen_any_list
);
1243 list_for_each_entry(cma_dev
, &dev_list
, list
)
1244 cma_listen_on_dev(id_priv
, cma_dev
);
1245 mutex_unlock(&lock
);
1248 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1250 struct sockaddr_in addr_in
;
1252 memset(&addr_in
, 0, sizeof addr_in
);
1253 addr_in
.sin_family
= af
;
1254 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1257 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1259 struct rdma_id_private
*id_priv
;
1262 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1263 if (id_priv
->state
== CMA_IDLE
) {
1264 ret
= cma_bind_any(id
, AF_INET
);
1269 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1272 id_priv
->backlog
= backlog
;
1274 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1275 case RDMA_TRANSPORT_IB
:
1276 ret
= cma_ib_listen(id_priv
);
1280 case RDMA_TRANSPORT_IWARP
:
1281 ret
= cma_iw_listen(id_priv
, backlog
);
1290 cma_listen_on_all(id_priv
);
1294 id_priv
->backlog
= 0;
1295 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1298 EXPORT_SYMBOL(rdma_listen
);
1300 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1303 struct cma_work
*work
= context
;
1304 struct rdma_route
*route
;
1306 route
= &work
->id
->id
.route
;
1309 route
->num_paths
= 1;
1310 *route
->path_rec
= *path_rec
;
1312 work
->old_state
= CMA_ROUTE_QUERY
;
1313 work
->new_state
= CMA_ADDR_RESOLVED
;
1314 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1315 work
->event
.status
= status
;
1318 queue_work(cma_wq
, &work
->work
);
1321 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1322 struct cma_work
*work
)
1324 struct rdma_dev_addr
*addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1325 struct ib_sa_path_rec path_rec
;
1327 memset(&path_rec
, 0, sizeof path_rec
);
1328 ib_addr_get_sgid(addr
, &path_rec
.sgid
);
1329 ib_addr_get_dgid(addr
, &path_rec
.dgid
);
1330 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(addr
));
1331 path_rec
.numb_path
= 1;
1333 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1334 id_priv
->id
.port_num
, &path_rec
,
1335 IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1336 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
,
1337 timeout_ms
, GFP_KERNEL
,
1338 cma_query_handler
, work
, &id_priv
->query
);
1340 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1343 static void cma_work_handler(void *data
)
1345 struct cma_work
*work
= data
;
1346 struct rdma_id_private
*id_priv
= work
->id
;
1349 atomic_inc(&id_priv
->dev_remove
);
1350 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1353 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1354 cma_exch(id_priv
, CMA_DESTROYING
);
1358 cma_release_remove(id_priv
);
1359 cma_deref_id(id_priv
);
1361 rdma_destroy_id(&id_priv
->id
);
1365 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1367 struct rdma_route
*route
= &id_priv
->id
.route
;
1368 struct cma_work
*work
;
1371 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1376 INIT_WORK(&work
->work
, cma_work_handler
, work
);
1377 work
->old_state
= CMA_ROUTE_QUERY
;
1378 work
->new_state
= CMA_ROUTE_RESOLVED
;
1379 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1381 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1382 if (!route
->path_rec
) {
1387 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1393 kfree(route
->path_rec
);
1394 route
->path_rec
= NULL
;
1400 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1401 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1403 struct rdma_id_private
*id_priv
;
1406 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1407 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1410 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1411 if (!id
->route
.path_rec
) {
1416 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1419 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1422 EXPORT_SYMBOL(rdma_set_ib_paths
);
1424 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1426 struct cma_work
*work
;
1428 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1433 INIT_WORK(&work
->work
, cma_work_handler
, work
);
1434 work
->old_state
= CMA_ROUTE_QUERY
;
1435 work
->new_state
= CMA_ROUTE_RESOLVED
;
1436 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1437 queue_work(cma_wq
, &work
->work
);
1441 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1443 struct rdma_id_private
*id_priv
;
1446 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1447 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1450 atomic_inc(&id_priv
->refcount
);
1451 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1452 case RDMA_TRANSPORT_IB
:
1453 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1455 case RDMA_TRANSPORT_IWARP
:
1456 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1467 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1468 cma_deref_id(id_priv
);
1471 EXPORT_SYMBOL(rdma_resolve_route
);
1473 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1475 struct cma_device
*cma_dev
;
1476 struct ib_port_attr port_attr
;
1483 list_for_each_entry(cma_dev
, &dev_list
, list
)
1484 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1485 if (!ib_query_port (cma_dev
->device
, p
, &port_attr
) &&
1486 port_attr
.state
== IB_PORT_ACTIVE
)
1489 if (!list_empty(&dev_list
)) {
1491 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1498 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1502 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1506 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1507 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1508 id_priv
->id
.port_num
= p
;
1509 cma_attach_to_dev(id_priv
, cma_dev
);
1511 mutex_unlock(&lock
);
1515 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1516 struct rdma_dev_addr
*dev_addr
, void *context
)
1518 struct rdma_id_private
*id_priv
= context
;
1519 enum rdma_cm_event_type event
;
1521 atomic_inc(&id_priv
->dev_remove
);
1524 * Grab mutex to block rdma_destroy_id() from removing the device while
1525 * we're trying to acquire it.
1528 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1529 mutex_unlock(&lock
);
1533 if (!status
&& !id_priv
->cma_dev
)
1534 status
= cma_acquire_dev(id_priv
);
1535 mutex_unlock(&lock
);
1538 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1540 event
= RDMA_CM_EVENT_ADDR_ERROR
;
1542 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1543 ip_addr_size(src_addr
));
1544 event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1547 if (cma_notify_user(id_priv
, event
, status
, NULL
, 0)) {
1548 cma_exch(id_priv
, CMA_DESTROYING
);
1549 cma_release_remove(id_priv
);
1550 cma_deref_id(id_priv
);
1551 rdma_destroy_id(&id_priv
->id
);
1555 cma_release_remove(id_priv
);
1556 cma_deref_id(id_priv
);
1559 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1561 struct cma_work
*work
;
1562 struct sockaddr_in
*src_in
, *dst_in
;
1566 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1570 if (!id_priv
->cma_dev
) {
1571 ret
= cma_bind_loopback(id_priv
);
1576 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1577 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1579 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1580 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1581 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1582 src_in
->sin_family
= dst_in
->sin_family
;
1583 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1587 INIT_WORK(&work
->work
, cma_work_handler
, work
);
1588 work
->old_state
= CMA_ADDR_QUERY
;
1589 work
->new_state
= CMA_ADDR_RESOLVED
;
1590 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1591 queue_work(cma_wq
, &work
->work
);
1598 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1599 struct sockaddr
*dst_addr
)
1601 if (src_addr
&& src_addr
->sa_family
)
1602 return rdma_bind_addr(id
, src_addr
);
1604 return cma_bind_any(id
, dst_addr
->sa_family
);
1607 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1608 struct sockaddr
*dst_addr
, int timeout_ms
)
1610 struct rdma_id_private
*id_priv
;
1613 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1614 if (id_priv
->state
== CMA_IDLE
) {
1615 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1620 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1623 atomic_inc(&id_priv
->refcount
);
1624 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1625 if (cma_any_addr(dst_addr
))
1626 ret
= cma_resolve_loopback(id_priv
);
1628 ret
= rdma_resolve_ip(&id
->route
.addr
.src_addr
, dst_addr
,
1629 &id
->route
.addr
.dev_addr
,
1630 timeout_ms
, addr_handler
, id_priv
);
1636 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1637 cma_deref_id(id_priv
);
1640 EXPORT_SYMBOL(rdma_resolve_addr
);
1642 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1643 struct rdma_id_private
*id_priv
)
1645 struct sockaddr_in
*sin
;
1647 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1648 sin
->sin_port
= htons(bind_list
->port
);
1649 id_priv
->bind_list
= bind_list
;
1650 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1653 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1654 unsigned short snum
)
1656 struct rdma_bind_list
*bind_list
;
1657 int port
, start
, ret
;
1659 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1663 start
= snum
? snum
: sysctl_local_port_range
[0];
1666 ret
= idr_get_new_above(ps
, bind_list
, start
, &port
);
1667 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1672 if ((snum
&& port
!= snum
) ||
1673 (!snum
&& port
> sysctl_local_port_range
[1])) {
1674 idr_remove(ps
, port
);
1675 ret
= -EADDRNOTAVAIL
;
1680 bind_list
->port
= (unsigned short) port
;
1681 cma_bind_port(bind_list
, id_priv
);
1688 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1690 struct rdma_id_private
*cur_id
;
1691 struct sockaddr_in
*sin
, *cur_sin
;
1692 struct rdma_bind_list
*bind_list
;
1693 struct hlist_node
*node
;
1694 unsigned short snum
;
1696 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1697 snum
= ntohs(sin
->sin_port
);
1698 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1701 bind_list
= idr_find(ps
, snum
);
1703 return cma_alloc_port(ps
, id_priv
, snum
);
1706 * We don't support binding to any address if anyone is bound to
1707 * a specific address on the same port.
1709 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1710 return -EADDRNOTAVAIL
;
1712 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1713 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1714 return -EADDRNOTAVAIL
;
1716 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1717 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1721 cma_bind_port(bind_list
, id_priv
);
1725 static int cma_get_port(struct rdma_id_private
*id_priv
)
1730 switch (id_priv
->id
.ps
) {
1738 return -EPROTONOSUPPORT
;
1742 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
1743 ret
= cma_alloc_port(ps
, id_priv
, 0);
1745 ret
= cma_use_port(ps
, id_priv
);
1746 mutex_unlock(&lock
);
1751 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1753 struct rdma_id_private
*id_priv
;
1756 if (addr
->sa_family
!= AF_INET
)
1757 return -EAFNOSUPPORT
;
1759 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1760 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
1763 if (!cma_any_addr(addr
)) {
1764 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
1767 ret
= cma_acquire_dev(id_priv
);
1768 mutex_unlock(&lock
);
1774 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
1775 ret
= cma_get_port(id_priv
);
1781 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
1784 EXPORT_SYMBOL(rdma_bind_addr
);
1786 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
1787 struct rdma_route
*route
)
1789 struct sockaddr_in
*src4
, *dst4
;
1790 struct cma_hdr
*cma_hdr
;
1791 struct sdp_hh
*sdp_hdr
;
1793 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
1794 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
1799 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
1801 sdp_set_ip_ver(sdp_hdr
, 4);
1802 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1803 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1804 sdp_hdr
->port
= src4
->sin_port
;
1808 cma_hdr
->cma_version
= CMA_VERSION
;
1809 cma_set_ip_ver(cma_hdr
, 4);
1810 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1811 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1812 cma_hdr
->port
= src4
->sin_port
;
1818 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
1819 struct rdma_conn_param
*conn_param
)
1821 struct ib_cm_req_param req
;
1822 struct rdma_route
*route
;
1826 memset(&req
, 0, sizeof req
);
1827 offset
= cma_user_data_offset(id_priv
->id
.ps
);
1828 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
1829 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
1833 if (conn_param
->private_data
&& conn_param
->private_data_len
)
1834 memcpy(private_data
+ offset
, conn_param
->private_data
,
1835 conn_param
->private_data_len
);
1837 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
1839 if (IS_ERR(id_priv
->cm_id
.ib
)) {
1840 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
1844 route
= &id_priv
->id
.route
;
1845 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
1848 req
.private_data
= private_data
;
1850 req
.primary_path
= &route
->path_rec
[0];
1851 if (route
->num_paths
== 2)
1852 req
.alternate_path
= &route
->path_rec
[1];
1854 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1855 &route
->addr
.dst_addr
);
1856 req
.qp_num
= id_priv
->qp_num
;
1857 req
.qp_type
= id_priv
->qp_type
;
1858 req
.starting_psn
= id_priv
->seq_num
;
1859 req
.responder_resources
= conn_param
->responder_resources
;
1860 req
.initiator_depth
= conn_param
->initiator_depth
;
1861 req
.flow_control
= conn_param
->flow_control
;
1862 req
.retry_count
= conn_param
->retry_count
;
1863 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
1864 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
1865 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
1866 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
1867 req
.srq
= id_priv
->srq
? 1 : 0;
1869 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
1871 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
1872 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1873 id_priv
->cm_id
.ib
= NULL
;
1876 kfree(private_data
);
1880 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
1881 struct rdma_conn_param
*conn_param
)
1883 struct iw_cm_id
*cm_id
;
1884 struct sockaddr_in
* sin
;
1886 struct iw_cm_conn_param iw_param
;
1888 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
1889 if (IS_ERR(cm_id
)) {
1890 ret
= PTR_ERR(cm_id
);
1894 id_priv
->cm_id
.iw
= cm_id
;
1896 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1897 cm_id
->local_addr
= *sin
;
1899 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1900 cm_id
->remote_addr
= *sin
;
1902 ret
= cma_modify_qp_rtr(&id_priv
->id
);
1906 iw_param
.ord
= conn_param
->initiator_depth
;
1907 iw_param
.ird
= conn_param
->responder_resources
;
1908 iw_param
.private_data
= conn_param
->private_data
;
1909 iw_param
.private_data_len
= conn_param
->private_data_len
;
1911 iw_param
.qpn
= id_priv
->qp_num
;
1913 iw_param
.qpn
= conn_param
->qp_num
;
1914 ret
= iw_cm_connect(cm_id
, &iw_param
);
1916 if (ret
&& !IS_ERR(cm_id
)) {
1917 iw_destroy_cm_id(cm_id
);
1918 id_priv
->cm_id
.iw
= NULL
;
1923 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
1925 struct rdma_id_private
*id_priv
;
1928 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1929 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
1933 id_priv
->qp_num
= conn_param
->qp_num
;
1934 id_priv
->qp_type
= conn_param
->qp_type
;
1935 id_priv
->srq
= conn_param
->srq
;
1938 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1939 case RDMA_TRANSPORT_IB
:
1940 ret
= cma_connect_ib(id_priv
, conn_param
);
1942 case RDMA_TRANSPORT_IWARP
:
1943 ret
= cma_connect_iw(id_priv
, conn_param
);
1954 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
1957 EXPORT_SYMBOL(rdma_connect
);
1959 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
1960 struct rdma_conn_param
*conn_param
)
1962 struct ib_cm_rep_param rep
;
1965 ret
= cma_modify_qp_rtr(&id_priv
->id
);
1969 memset(&rep
, 0, sizeof rep
);
1970 rep
.qp_num
= id_priv
->qp_num
;
1971 rep
.starting_psn
= id_priv
->seq_num
;
1972 rep
.private_data
= conn_param
->private_data
;
1973 rep
.private_data_len
= conn_param
->private_data_len
;
1974 rep
.responder_resources
= conn_param
->responder_resources
;
1975 rep
.initiator_depth
= conn_param
->initiator_depth
;
1976 rep
.target_ack_delay
= CMA_CM_RESPONSE_TIMEOUT
;
1977 rep
.failover_accepted
= 0;
1978 rep
.flow_control
= conn_param
->flow_control
;
1979 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
1980 rep
.srq
= id_priv
->srq
? 1 : 0;
1982 return ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
1985 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
1986 struct rdma_conn_param
*conn_param
)
1988 struct iw_cm_conn_param iw_param
;
1991 ret
= cma_modify_qp_rtr(&id_priv
->id
);
1995 iw_param
.ord
= conn_param
->initiator_depth
;
1996 iw_param
.ird
= conn_param
->responder_resources
;
1997 iw_param
.private_data
= conn_param
->private_data
;
1998 iw_param
.private_data_len
= conn_param
->private_data_len
;
1999 if (id_priv
->id
.qp
) {
2000 iw_param
.qpn
= id_priv
->qp_num
;
2002 iw_param
.qpn
= conn_param
->qp_num
;
2004 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2007 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2009 struct rdma_id_private
*id_priv
;
2012 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2013 if (!cma_comp(id_priv
, CMA_CONNECT
))
2016 if (!id
->qp
&& conn_param
) {
2017 id_priv
->qp_num
= conn_param
->qp_num
;
2018 id_priv
->qp_type
= conn_param
->qp_type
;
2019 id_priv
->srq
= conn_param
->srq
;
2022 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2023 case RDMA_TRANSPORT_IB
:
2025 ret
= cma_accept_ib(id_priv
, conn_param
);
2027 ret
= cma_rep_recv(id_priv
);
2029 case RDMA_TRANSPORT_IWARP
:
2030 ret
= cma_accept_iw(id_priv
, conn_param
);
2042 cma_modify_qp_err(id
);
2043 rdma_reject(id
, NULL
, 0);
2046 EXPORT_SYMBOL(rdma_accept
);
2048 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2049 u8 private_data_len
)
2051 struct rdma_id_private
*id_priv
;
2054 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2055 if (!cma_comp(id_priv
, CMA_CONNECT
))
2058 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2059 case RDMA_TRANSPORT_IB
:
2060 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2061 IB_CM_REJ_CONSUMER_DEFINED
, NULL
, 0,
2062 private_data
, private_data_len
);
2064 case RDMA_TRANSPORT_IWARP
:
2065 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2066 private_data
, private_data_len
);
2074 EXPORT_SYMBOL(rdma_reject
);
2076 int rdma_disconnect(struct rdma_cm_id
*id
)
2078 struct rdma_id_private
*id_priv
;
2081 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2082 if (!cma_comp(id_priv
, CMA_CONNECT
) &&
2083 !cma_comp(id_priv
, CMA_DISCONNECT
))
2086 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2087 case RDMA_TRANSPORT_IB
:
2088 ret
= cma_modify_qp_err(id
);
2091 /* Initiate or respond to a disconnect. */
2092 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2093 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2095 case RDMA_TRANSPORT_IWARP
:
2096 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2105 EXPORT_SYMBOL(rdma_disconnect
);
2107 static void cma_add_one(struct ib_device
*device
)
2109 struct cma_device
*cma_dev
;
2110 struct rdma_id_private
*id_priv
;
2112 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2116 cma_dev
->device
= device
;
2117 cma_dev
->node_guid
= device
->node_guid
;
2118 if (!cma_dev
->node_guid
)
2121 init_completion(&cma_dev
->comp
);
2122 atomic_set(&cma_dev
->refcount
, 1);
2123 INIT_LIST_HEAD(&cma_dev
->id_list
);
2124 ib_set_client_data(device
, &cma_client
, cma_dev
);
2127 list_add_tail(&cma_dev
->list
, &dev_list
);
2128 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2129 cma_listen_on_dev(id_priv
, cma_dev
);
2130 mutex_unlock(&lock
);
2136 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2138 enum cma_state state
;
2140 /* Record that we want to remove the device */
2141 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2142 if (state
== CMA_DESTROYING
)
2145 cma_cancel_operation(id_priv
, state
);
2146 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
2148 /* Check for destruction from another callback. */
2149 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2152 return cma_notify_user(id_priv
, RDMA_CM_EVENT_DEVICE_REMOVAL
,
2156 static void cma_process_remove(struct cma_device
*cma_dev
)
2158 struct rdma_id_private
*id_priv
;
2162 while (!list_empty(&cma_dev
->id_list
)) {
2163 id_priv
= list_entry(cma_dev
->id_list
.next
,
2164 struct rdma_id_private
, list
);
2166 if (cma_internal_listen(id_priv
)) {
2167 cma_destroy_listen(id_priv
);
2171 list_del_init(&id_priv
->list
);
2172 atomic_inc(&id_priv
->refcount
);
2173 mutex_unlock(&lock
);
2175 ret
= cma_remove_id_dev(id_priv
);
2176 cma_deref_id(id_priv
);
2178 rdma_destroy_id(&id_priv
->id
);
2182 mutex_unlock(&lock
);
2184 cma_deref_dev(cma_dev
);
2185 wait_for_completion(&cma_dev
->comp
);
2188 static void cma_remove_one(struct ib_device
*device
)
2190 struct cma_device
*cma_dev
;
2192 cma_dev
= ib_get_client_data(device
, &cma_client
);
2197 list_del(&cma_dev
->list
);
2198 mutex_unlock(&lock
);
2200 cma_process_remove(cma_dev
);
2204 static int cma_init(void)
2208 cma_wq
= create_singlethread_workqueue("rdma_cm_wq");
2212 ib_sa_register_client(&sa_client
);
2214 ret
= ib_register_client(&cma_client
);
2220 ib_sa_unregister_client(&sa_client
);
2221 destroy_workqueue(cma_wq
);
2225 static void cma_cleanup(void)
2227 ib_unregister_client(&cma_client
);
2228 ib_sa_unregister_client(&sa_client
);
2229 destroy_workqueue(cma_wq
);
2230 idr_destroy(&sdp_ps
);
2231 idr_destroy(&tcp_ps
);
2234 module_init(cma_init
);
2235 module_exit(cma_cleanup
);