2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
41 #include <rdma/rdma_cm.h>
42 #include <rdma/rdma_cm_ib.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_cm.h>
45 #include <rdma/ib_sa.h>
47 MODULE_AUTHOR("Sean Hefty");
48 MODULE_DESCRIPTION("Generic RDMA CM Agent");
49 MODULE_LICENSE("Dual BSD/GPL");
51 #define CMA_CM_RESPONSE_TIMEOUT 20
52 #define CMA_MAX_CM_RETRIES 3
54 static void cma_add_one(struct ib_device
*device
);
55 static void cma_remove_one(struct ib_device
*device
);
57 static struct ib_client cma_client
= {
60 .remove
= cma_remove_one
63 static LIST_HEAD(dev_list
);
64 static LIST_HEAD(listen_any_list
);
65 static DEFINE_MUTEX(lock
);
66 static struct workqueue_struct
*cma_wq
;
67 static DEFINE_IDR(sdp_ps
);
68 static DEFINE_IDR(tcp_ps
);
71 struct list_head list
;
72 struct ib_device
*device
;
74 struct completion comp
;
76 struct list_head id_list
;
93 struct rdma_bind_list
{
95 struct hlist_head owners
;
100 * Device removal can occur at anytime, so we need extra handling to
101 * serialize notifying the user of device removal with other callbacks.
102 * We do this by disabling removal notification while a callback is in process,
103 * and reporting it after the callback completes.
105 struct rdma_id_private
{
106 struct rdma_cm_id id
;
108 struct rdma_bind_list
*bind_list
;
109 struct hlist_node node
;
110 struct list_head list
;
111 struct list_head listen_list
;
112 struct cma_device
*cma_dev
;
114 enum cma_state state
;
116 struct completion comp
;
118 wait_queue_head_t wait_remove
;
123 struct ib_sa_query
*query
;
131 enum ib_qp_type qp_type
;
136 struct work_struct work
;
137 struct rdma_id_private
*id
;
138 enum cma_state old_state
;
139 enum cma_state new_state
;
140 struct rdma_cm_event event
;
153 u8 ip_version
; /* IP version: 7:4 */
155 union cma_ip_addr src_addr
;
156 union cma_ip_addr dst_addr
;
161 u8 sdp_version
; /* Major version: 7:4 */
162 u8 ip_version
; /* IP version: 7:4 */
163 u8 sdp_specific1
[10];
166 union cma_ip_addr src_addr
;
167 union cma_ip_addr dst_addr
;
175 #define CMA_VERSION 0x00
176 #define SDP_MAJ_VERSION 0x2
178 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
183 spin_lock_irqsave(&id_priv
->lock
, flags
);
184 ret
= (id_priv
->state
== comp
);
185 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
189 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
190 enum cma_state comp
, enum cma_state exch
)
195 spin_lock_irqsave(&id_priv
->lock
, flags
);
196 if ((ret
= (id_priv
->state
== comp
)))
197 id_priv
->state
= exch
;
198 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
202 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
208 spin_lock_irqsave(&id_priv
->lock
, flags
);
209 old
= id_priv
->state
;
210 id_priv
->state
= exch
;
211 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
215 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
217 return hdr
->ip_version
>> 4;
220 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
222 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
225 static inline u8
sdp_get_majv(u8 sdp_version
)
227 return sdp_version
>> 4;
230 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
232 return hh
->ip_version
>> 4;
235 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
237 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
240 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
241 struct cma_device
*cma_dev
)
243 atomic_inc(&cma_dev
->refcount
);
244 id_priv
->cma_dev
= cma_dev
;
245 id_priv
->id
.device
= cma_dev
->device
;
246 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
249 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
251 if (atomic_dec_and_test(&cma_dev
->refcount
))
252 complete(&cma_dev
->comp
);
255 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
257 list_del(&id_priv
->list
);
258 cma_deref_dev(id_priv
->cma_dev
);
259 id_priv
->cma_dev
= NULL
;
262 static int cma_acquire_ib_dev(struct rdma_id_private
*id_priv
)
264 struct cma_device
*cma_dev
;
268 gid
= ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
);
271 list_for_each_entry(cma_dev
, &dev_list
, list
) {
272 ret
= ib_find_cached_gid(cma_dev
->device
, gid
,
273 &id_priv
->id
.port_num
, NULL
);
275 cma_attach_to_dev(id_priv
, cma_dev
);
283 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
285 switch (id_priv
->id
.route
.addr
.dev_addr
.dev_type
) {
287 return cma_acquire_ib_dev(id_priv
);
293 static void cma_deref_id(struct rdma_id_private
*id_priv
)
295 if (atomic_dec_and_test(&id_priv
->refcount
))
296 complete(&id_priv
->comp
);
299 static void cma_release_remove(struct rdma_id_private
*id_priv
)
301 if (atomic_dec_and_test(&id_priv
->dev_remove
))
302 wake_up(&id_priv
->wait_remove
);
305 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
306 void *context
, enum rdma_port_space ps
)
308 struct rdma_id_private
*id_priv
;
310 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
312 return ERR_PTR(-ENOMEM
);
314 id_priv
->state
= CMA_IDLE
;
315 id_priv
->id
.context
= context
;
316 id_priv
->id
.event_handler
= event_handler
;
318 spin_lock_init(&id_priv
->lock
);
319 init_completion(&id_priv
->comp
);
320 atomic_set(&id_priv
->refcount
, 1);
321 init_waitqueue_head(&id_priv
->wait_remove
);
322 atomic_set(&id_priv
->dev_remove
, 0);
323 INIT_LIST_HEAD(&id_priv
->listen_list
);
324 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
328 EXPORT_SYMBOL(rdma_create_id
);
330 static int cma_init_ib_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
332 struct ib_qp_attr qp_attr
;
333 struct rdma_dev_addr
*dev_addr
;
336 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
337 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
338 ib_addr_get_pkey(dev_addr
),
339 &qp_attr
.pkey_index
);
343 qp_attr
.qp_state
= IB_QPS_INIT
;
344 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
345 qp_attr
.port_num
= id_priv
->id
.port_num
;
346 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
347 IB_QP_PKEY_INDEX
| IB_QP_PORT
);
350 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
351 struct ib_qp_init_attr
*qp_init_attr
)
353 struct rdma_id_private
*id_priv
;
357 id_priv
= container_of(id
, struct rdma_id_private
, id
);
358 if (id
->device
!= pd
->device
)
361 qp
= ib_create_qp(pd
, qp_init_attr
);
365 switch (id
->device
->node_type
) {
367 ret
= cma_init_ib_qp(id_priv
, qp
);
378 id_priv
->qp_num
= qp
->qp_num
;
379 id_priv
->qp_type
= qp
->qp_type
;
380 id_priv
->srq
= (qp
->srq
!= NULL
);
386 EXPORT_SYMBOL(rdma_create_qp
);
388 void rdma_destroy_qp(struct rdma_cm_id
*id
)
390 ib_destroy_qp(id
->qp
);
392 EXPORT_SYMBOL(rdma_destroy_qp
);
394 static int cma_modify_qp_rtr(struct rdma_cm_id
*id
)
396 struct ib_qp_attr qp_attr
;
397 int qp_attr_mask
, ret
;
402 /* Need to update QP attributes from default values. */
403 qp_attr
.qp_state
= IB_QPS_INIT
;
404 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
408 ret
= ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
412 qp_attr
.qp_state
= IB_QPS_RTR
;
413 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
417 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
420 static int cma_modify_qp_rts(struct rdma_cm_id
*id
)
422 struct ib_qp_attr qp_attr
;
423 int qp_attr_mask
, ret
;
428 qp_attr
.qp_state
= IB_QPS_RTS
;
429 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
433 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
436 static int cma_modify_qp_err(struct rdma_cm_id
*id
)
438 struct ib_qp_attr qp_attr
;
443 qp_attr
.qp_state
= IB_QPS_ERR
;
444 return ib_modify_qp(id
->qp
, &qp_attr
, IB_QP_STATE
);
447 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
450 struct rdma_id_private
*id_priv
;
453 id_priv
= container_of(id
, struct rdma_id_private
, id
);
454 switch (id_priv
->id
.device
->node_type
) {
456 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
458 if (qp_attr
->qp_state
== IB_QPS_RTR
)
459 qp_attr
->rq_psn
= id_priv
->seq_num
;
468 EXPORT_SYMBOL(rdma_init_qp_attr
);
470 static inline int cma_zero_addr(struct sockaddr
*addr
)
472 struct in6_addr
*ip6
;
474 if (addr
->sa_family
== AF_INET
)
475 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
477 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
478 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
479 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
483 static inline int cma_loopback_addr(struct sockaddr
*addr
)
485 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
488 static inline int cma_any_addr(struct sockaddr
*addr
)
490 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
493 static inline int cma_any_port(struct sockaddr
*addr
)
495 return !((struct sockaddr_in
*) addr
)->sin_port
;
498 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
499 u8
*ip_ver
, __u16
*port
,
500 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
504 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
508 *ip_ver
= sdp_get_ip_ver(hdr
);
509 *port
= ((struct sdp_hh
*) hdr
)->port
;
510 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
511 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
514 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
517 *ip_ver
= cma_get_ip_ver(hdr
);
518 *port
= ((struct cma_hdr
*) hdr
)->port
;
519 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
520 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
524 if (*ip_ver
!= 4 && *ip_ver
!= 6)
529 static void cma_save_net_info(struct rdma_addr
*addr
,
530 struct rdma_addr
*listen_addr
,
531 u8 ip_ver
, __u16 port
,
532 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
534 struct sockaddr_in
*listen4
, *ip4
;
535 struct sockaddr_in6
*listen6
, *ip6
;
539 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
540 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
541 ip4
->sin_family
= listen4
->sin_family
;
542 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
543 ip4
->sin_port
= listen4
->sin_port
;
545 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
546 ip4
->sin_family
= listen4
->sin_family
;
547 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
548 ip4
->sin_port
= port
;
551 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
552 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
553 ip6
->sin6_family
= listen6
->sin6_family
;
554 ip6
->sin6_addr
= dst
->ip6
;
555 ip6
->sin6_port
= listen6
->sin6_port
;
557 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
558 ip6
->sin6_family
= listen6
->sin6_family
;
559 ip6
->sin6_addr
= src
->ip6
;
560 ip6
->sin6_port
= port
;
567 static inline int cma_user_data_offset(enum rdma_port_space ps
)
573 return sizeof(struct cma_hdr
);
577 static int cma_notify_user(struct rdma_id_private
*id_priv
,
578 enum rdma_cm_event_type type
, int status
,
579 void *data
, u8 data_len
)
581 struct rdma_cm_event event
;
584 event
.status
= status
;
585 event
.private_data
= data
;
586 event
.private_data_len
= data_len
;
588 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
591 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
593 switch (id_priv
->id
.device
->node_type
) {
596 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
603 static inline int cma_internal_listen(struct rdma_id_private
*id_priv
)
605 return (id_priv
->state
== CMA_LISTEN
) && id_priv
->cma_dev
&&
606 cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
);
609 static void cma_destroy_listen(struct rdma_id_private
*id_priv
)
611 cma_exch(id_priv
, CMA_DESTROYING
);
613 if (id_priv
->cma_dev
) {
614 switch (id_priv
->id
.device
->node_type
) {
616 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
617 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
622 cma_detach_from_dev(id_priv
);
624 list_del(&id_priv
->listen_list
);
626 cma_deref_id(id_priv
);
627 wait_for_completion(&id_priv
->comp
);
632 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
634 struct rdma_id_private
*dev_id_priv
;
637 list_del(&id_priv
->list
);
639 while (!list_empty(&id_priv
->listen_list
)) {
640 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
641 struct rdma_id_private
, listen_list
);
642 cma_destroy_listen(dev_id_priv
);
647 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
648 enum cma_state state
)
652 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
654 case CMA_ROUTE_QUERY
:
655 cma_cancel_route(id_priv
);
658 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
660 cma_cancel_listens(id_priv
);
667 static void cma_release_port(struct rdma_id_private
*id_priv
)
669 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
675 hlist_del(&id_priv
->node
);
676 if (hlist_empty(&bind_list
->owners
)) {
677 idr_remove(bind_list
->ps
, bind_list
->port
);
683 void rdma_destroy_id(struct rdma_cm_id
*id
)
685 struct rdma_id_private
*id_priv
;
686 enum cma_state state
;
688 id_priv
= container_of(id
, struct rdma_id_private
, id
);
689 state
= cma_exch(id_priv
, CMA_DESTROYING
);
690 cma_cancel_operation(id_priv
, state
);
692 if (id_priv
->cma_dev
) {
693 switch (id
->device
->node_type
) {
695 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
696 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
702 cma_detach_from_dev(id_priv
);
706 cma_release_port(id_priv
);
707 cma_deref_id(id_priv
);
708 wait_for_completion(&id_priv
->comp
);
710 kfree(id_priv
->id
.route
.path_rec
);
713 EXPORT_SYMBOL(rdma_destroy_id
);
715 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
719 ret
= cma_modify_qp_rtr(&id_priv
->id
);
723 ret
= cma_modify_qp_rts(&id_priv
->id
);
727 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
733 cma_modify_qp_err(&id_priv
->id
);
734 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
739 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
741 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
742 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
749 static int cma_rtu_recv(struct rdma_id_private
*id_priv
)
753 ret
= cma_modify_qp_rts(&id_priv
->id
);
759 cma_modify_qp_err(&id_priv
->id
);
760 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
765 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
767 struct rdma_id_private
*id_priv
= cm_id
->context
;
768 enum rdma_cm_event_type event
;
769 u8 private_data_len
= 0;
770 int ret
= 0, status
= 0;
772 atomic_inc(&id_priv
->dev_remove
);
773 if (!cma_comp(id_priv
, CMA_CONNECT
))
776 switch (ib_event
->event
) {
777 case IB_CM_REQ_ERROR
:
778 case IB_CM_REP_ERROR
:
779 event
= RDMA_CM_EVENT_UNREACHABLE
;
782 case IB_CM_REP_RECEIVED
:
783 status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
785 event
= RDMA_CM_EVENT_CONNECT_ERROR
;
786 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
787 status
= cma_rep_recv(id_priv
);
788 event
= status
? RDMA_CM_EVENT_CONNECT_ERROR
:
789 RDMA_CM_EVENT_ESTABLISHED
;
791 event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
792 private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
794 case IB_CM_RTU_RECEIVED
:
795 status
= cma_rtu_recv(id_priv
);
796 event
= status
? RDMA_CM_EVENT_CONNECT_ERROR
:
797 RDMA_CM_EVENT_ESTABLISHED
;
799 case IB_CM_DREQ_ERROR
:
800 status
= -ETIMEDOUT
; /* fall through */
801 case IB_CM_DREQ_RECEIVED
:
802 case IB_CM_DREP_RECEIVED
:
803 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
805 event
= RDMA_CM_EVENT_DISCONNECTED
;
807 case IB_CM_TIMEWAIT_EXIT
:
808 case IB_CM_MRA_RECEIVED
:
811 case IB_CM_REJ_RECEIVED
:
812 cma_modify_qp_err(&id_priv
->id
);
813 status
= ib_event
->param
.rej_rcvd
.reason
;
814 event
= RDMA_CM_EVENT_REJECTED
;
817 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
822 ret
= cma_notify_user(id_priv
, event
, status
, ib_event
->private_data
,
825 /* Destroy the CM ID by returning a non-zero value. */
826 id_priv
->cm_id
.ib
= NULL
;
827 cma_exch(id_priv
, CMA_DESTROYING
);
828 cma_release_remove(id_priv
);
829 rdma_destroy_id(&id_priv
->id
);
833 cma_release_remove(id_priv
);
837 static struct rdma_id_private
*cma_new_id(struct rdma_cm_id
*listen_id
,
838 struct ib_cm_event
*ib_event
)
840 struct rdma_id_private
*id_priv
;
841 struct rdma_cm_id
*id
;
842 struct rdma_route
*rt
;
843 union cma_ip_addr
*src
, *dst
;
847 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
853 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
854 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
, GFP_KERNEL
);
858 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
859 &ip_ver
, &port
, &src
, &dst
))
862 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
863 ip_ver
, port
, src
, dst
);
864 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
865 if (rt
->num_paths
== 2)
866 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
868 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
869 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
870 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
871 rt
->addr
.dev_addr
.dev_type
= IB_NODE_CA
;
873 id_priv
= container_of(id
, struct rdma_id_private
, id
);
874 id_priv
->state
= CMA_CONNECT
;
881 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
883 struct rdma_id_private
*listen_id
, *conn_id
;
886 listen_id
= cm_id
->context
;
887 atomic_inc(&listen_id
->dev_remove
);
888 if (!cma_comp(listen_id
, CMA_LISTEN
)) {
893 conn_id
= cma_new_id(&listen_id
->id
, ib_event
);
899 atomic_inc(&conn_id
->dev_remove
);
900 ret
= cma_acquire_ib_dev(conn_id
);
903 cma_release_remove(conn_id
);
904 rdma_destroy_id(&conn_id
->id
);
908 conn_id
->cm_id
.ib
= cm_id
;
909 cm_id
->context
= conn_id
;
910 cm_id
->cm_handler
= cma_ib_handler
;
912 offset
= cma_user_data_offset(listen_id
->id
.ps
);
913 ret
= cma_notify_user(conn_id
, RDMA_CM_EVENT_CONNECT_REQUEST
, 0,
914 ib_event
->private_data
+ offset
,
915 IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
);
917 /* Destroy the CM ID by returning a non-zero value. */
918 conn_id
->cm_id
.ib
= NULL
;
919 cma_exch(conn_id
, CMA_DESTROYING
);
920 cma_release_remove(conn_id
);
921 rdma_destroy_id(&conn_id
->id
);
924 cma_release_remove(listen_id
);
928 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
930 return cpu_to_be64(((u64
)ps
<< 16) +
931 be16_to_cpu(((struct sockaddr_in
*) addr
)->sin_port
));
934 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
935 struct ib_cm_compare_data
*compare
)
937 struct cma_hdr
*cma_data
, *cma_mask
;
938 struct sdp_hh
*sdp_data
, *sdp_mask
;
940 struct in6_addr ip6_addr
;
942 memset(compare
, 0, sizeof *compare
);
943 cma_data
= (void *) compare
->data
;
944 cma_mask
= (void *) compare
->mask
;
945 sdp_data
= (void *) compare
->data
;
946 sdp_mask
= (void *) compare
->mask
;
948 switch (addr
->sa_family
) {
950 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
951 if (ps
== RDMA_PS_SDP
) {
952 sdp_set_ip_ver(sdp_data
, 4);
953 sdp_set_ip_ver(sdp_mask
, 0xF);
954 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
955 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
957 cma_set_ip_ver(cma_data
, 4);
958 cma_set_ip_ver(cma_mask
, 0xF);
959 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
960 cma_mask
->dst_addr
.ip4
.addr
= ~0;
964 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
965 if (ps
== RDMA_PS_SDP
) {
966 sdp_set_ip_ver(sdp_data
, 6);
967 sdp_set_ip_ver(sdp_mask
, 0xF);
968 sdp_data
->dst_addr
.ip6
= ip6_addr
;
969 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
970 sizeof sdp_mask
->dst_addr
.ip6
);
972 cma_set_ip_ver(cma_data
, 6);
973 cma_set_ip_ver(cma_mask
, 0xF);
974 cma_data
->dst_addr
.ip6
= ip6_addr
;
975 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
976 sizeof cma_mask
->dst_addr
.ip6
);
984 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
986 struct ib_cm_compare_data compare_data
;
987 struct sockaddr
*addr
;
991 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
993 if (IS_ERR(id_priv
->cm_id
.ib
))
994 return PTR_ERR(id_priv
->cm_id
.ib
);
996 addr
= &id_priv
->id
.route
.addr
.src_addr
;
997 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
998 if (cma_any_addr(addr
))
999 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1001 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1002 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1006 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1007 id_priv
->cm_id
.ib
= NULL
;
1013 static int cma_listen_handler(struct rdma_cm_id
*id
,
1014 struct rdma_cm_event
*event
)
1016 struct rdma_id_private
*id_priv
= id
->context
;
1018 id
->context
= id_priv
->id
.context
;
1019 id
->event_handler
= id_priv
->id
.event_handler
;
1020 return id_priv
->id
.event_handler(id
, event
);
1023 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1024 struct cma_device
*cma_dev
)
1026 struct rdma_id_private
*dev_id_priv
;
1027 struct rdma_cm_id
*id
;
1030 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1034 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1036 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1037 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1038 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1040 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1041 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1043 ret
= rdma_listen(id
, id_priv
->backlog
);
1049 cma_destroy_listen(dev_id_priv
);
1052 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1054 struct cma_device
*cma_dev
;
1057 list_add_tail(&id_priv
->list
, &listen_any_list
);
1058 list_for_each_entry(cma_dev
, &dev_list
, list
)
1059 cma_listen_on_dev(id_priv
, cma_dev
);
1060 mutex_unlock(&lock
);
1063 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1065 struct sockaddr_in addr_in
;
1067 memset(&addr_in
, 0, sizeof addr_in
);
1068 addr_in
.sin_family
= af
;
1069 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1072 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1074 struct rdma_id_private
*id_priv
;
1077 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1078 if (id_priv
->state
== CMA_IDLE
) {
1079 ret
= cma_bind_any(id
, AF_INET
);
1084 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1087 id_priv
->backlog
= backlog
;
1089 switch (id
->device
->node_type
) {
1091 ret
= cma_ib_listen(id_priv
);
1100 cma_listen_on_all(id_priv
);
1104 id_priv
->backlog
= 0;
1105 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1108 EXPORT_SYMBOL(rdma_listen
);
1110 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1113 struct cma_work
*work
= context
;
1114 struct rdma_route
*route
;
1116 route
= &work
->id
->id
.route
;
1119 route
->num_paths
= 1;
1120 *route
->path_rec
= *path_rec
;
1122 work
->old_state
= CMA_ROUTE_QUERY
;
1123 work
->new_state
= CMA_ADDR_RESOLVED
;
1124 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1127 queue_work(cma_wq
, &work
->work
);
1130 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1131 struct cma_work
*work
)
1133 struct rdma_dev_addr
*addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1134 struct ib_sa_path_rec path_rec
;
1136 memset(&path_rec
, 0, sizeof path_rec
);
1137 path_rec
.sgid
= *ib_addr_get_sgid(addr
);
1138 path_rec
.dgid
= *ib_addr_get_dgid(addr
);
1139 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(addr
));
1140 path_rec
.numb_path
= 1;
1142 id_priv
->query_id
= ib_sa_path_rec_get(id_priv
->id
.device
,
1143 id_priv
->id
.port_num
, &path_rec
,
1144 IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1145 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
,
1146 timeout_ms
, GFP_KERNEL
,
1147 cma_query_handler
, work
, &id_priv
->query
);
1149 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1152 static void cma_work_handler(void *data
)
1154 struct cma_work
*work
= data
;
1155 struct rdma_id_private
*id_priv
= work
->id
;
1158 atomic_inc(&id_priv
->dev_remove
);
1159 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1162 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1163 cma_exch(id_priv
, CMA_DESTROYING
);
1167 cma_release_remove(id_priv
);
1168 cma_deref_id(id_priv
);
1170 rdma_destroy_id(&id_priv
->id
);
1174 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1176 struct rdma_route
*route
= &id_priv
->id
.route
;
1177 struct cma_work
*work
;
1180 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1185 INIT_WORK(&work
->work
, cma_work_handler
, work
);
1186 work
->old_state
= CMA_ROUTE_QUERY
;
1187 work
->new_state
= CMA_ROUTE_RESOLVED
;
1188 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1190 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1191 if (!route
->path_rec
) {
1196 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1202 kfree(route
->path_rec
);
1203 route
->path_rec
= NULL
;
1209 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1210 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1212 struct rdma_id_private
*id_priv
;
1215 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1216 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1219 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1220 if (!id
->route
.path_rec
) {
1225 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1228 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1231 EXPORT_SYMBOL(rdma_set_ib_paths
);
1233 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1235 struct rdma_id_private
*id_priv
;
1238 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1239 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1242 atomic_inc(&id_priv
->refcount
);
1243 switch (id
->device
->node_type
) {
1245 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1256 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1257 cma_deref_id(id_priv
);
1260 EXPORT_SYMBOL(rdma_resolve_route
);
1262 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1264 struct cma_device
*cma_dev
;
1265 struct ib_port_attr port_attr
;
1272 list_for_each_entry(cma_dev
, &dev_list
, list
)
1273 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1274 if (!ib_query_port (cma_dev
->device
, p
, &port_attr
) &&
1275 port_attr
.state
== IB_PORT_ACTIVE
)
1278 if (!list_empty(&dev_list
)) {
1280 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1287 gid
= ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
);
1288 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, gid
);
1292 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1296 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1297 id_priv
->id
.port_num
= p
;
1298 cma_attach_to_dev(id_priv
, cma_dev
);
1300 mutex_unlock(&lock
);
1304 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1305 struct rdma_dev_addr
*dev_addr
, void *context
)
1307 struct rdma_id_private
*id_priv
= context
;
1308 enum rdma_cm_event_type event
;
1310 atomic_inc(&id_priv
->dev_remove
);
1311 if (!id_priv
->cma_dev
&& !status
)
1312 status
= cma_acquire_dev(id_priv
);
1315 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
))
1317 event
= RDMA_CM_EVENT_ADDR_ERROR
;
1319 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
))
1321 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1322 ip_addr_size(src_addr
));
1323 event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1326 if (cma_notify_user(id_priv
, event
, status
, NULL
, 0)) {
1327 cma_exch(id_priv
, CMA_DESTROYING
);
1328 cma_release_remove(id_priv
);
1329 cma_deref_id(id_priv
);
1330 rdma_destroy_id(&id_priv
->id
);
1334 cma_release_remove(id_priv
);
1335 cma_deref_id(id_priv
);
1338 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1340 struct cma_work
*work
;
1341 struct sockaddr_in
*src_in
, *dst_in
;
1344 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1348 if (!id_priv
->cma_dev
) {
1349 ret
= cma_bind_loopback(id_priv
);
1354 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
,
1355 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
));
1357 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1358 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1359 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1360 src_in
->sin_family
= dst_in
->sin_family
;
1361 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1365 INIT_WORK(&work
->work
, cma_work_handler
, work
);
1366 work
->old_state
= CMA_ADDR_QUERY
;
1367 work
->new_state
= CMA_ADDR_RESOLVED
;
1368 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1369 queue_work(cma_wq
, &work
->work
);
1376 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1377 struct sockaddr
*dst_addr
)
1379 if (src_addr
&& src_addr
->sa_family
)
1380 return rdma_bind_addr(id
, src_addr
);
1382 return cma_bind_any(id
, dst_addr
->sa_family
);
1385 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1386 struct sockaddr
*dst_addr
, int timeout_ms
)
1388 struct rdma_id_private
*id_priv
;
1391 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1392 if (id_priv
->state
== CMA_IDLE
) {
1393 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1398 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1401 atomic_inc(&id_priv
->refcount
);
1402 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1403 if (cma_any_addr(dst_addr
))
1404 ret
= cma_resolve_loopback(id_priv
);
1406 ret
= rdma_resolve_ip(&id
->route
.addr
.src_addr
, dst_addr
,
1407 &id
->route
.addr
.dev_addr
,
1408 timeout_ms
, addr_handler
, id_priv
);
1414 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1415 cma_deref_id(id_priv
);
1418 EXPORT_SYMBOL(rdma_resolve_addr
);
1420 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1421 struct rdma_id_private
*id_priv
)
1423 struct sockaddr_in
*sin
;
1425 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1426 sin
->sin_port
= htons(bind_list
->port
);
1427 id_priv
->bind_list
= bind_list
;
1428 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1431 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1432 unsigned short snum
)
1434 struct rdma_bind_list
*bind_list
;
1435 int port
, start
, ret
;
1437 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1441 start
= snum
? snum
: sysctl_local_port_range
[0];
1444 ret
= idr_get_new_above(ps
, bind_list
, start
, &port
);
1445 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1450 if ((snum
&& port
!= snum
) ||
1451 (!snum
&& port
> sysctl_local_port_range
[1])) {
1452 idr_remove(ps
, port
);
1453 ret
= -EADDRNOTAVAIL
;
1458 bind_list
->port
= (unsigned short) port
;
1459 cma_bind_port(bind_list
, id_priv
);
1466 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1468 struct rdma_id_private
*cur_id
;
1469 struct sockaddr_in
*sin
, *cur_sin
;
1470 struct rdma_bind_list
*bind_list
;
1471 struct hlist_node
*node
;
1472 unsigned short snum
;
1474 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1475 snum
= ntohs(sin
->sin_port
);
1476 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1479 bind_list
= idr_find(ps
, snum
);
1481 return cma_alloc_port(ps
, id_priv
, snum
);
1484 * We don't support binding to any address if anyone is bound to
1485 * a specific address on the same port.
1487 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1488 return -EADDRNOTAVAIL
;
1490 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1491 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1492 return -EADDRNOTAVAIL
;
1494 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1495 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1499 cma_bind_port(bind_list
, id_priv
);
1503 static int cma_get_port(struct rdma_id_private
*id_priv
)
1508 switch (id_priv
->id
.ps
) {
1516 return -EPROTONOSUPPORT
;
1520 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
1521 ret
= cma_alloc_port(ps
, id_priv
, 0);
1523 ret
= cma_use_port(ps
, id_priv
);
1524 mutex_unlock(&lock
);
1529 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1531 struct rdma_id_private
*id_priv
;
1534 if (addr
->sa_family
!= AF_INET
)
1535 return -EAFNOSUPPORT
;
1537 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1538 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
1541 if (!cma_any_addr(addr
)) {
1542 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
1544 ret
= cma_acquire_dev(id_priv
);
1549 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
1550 ret
= cma_get_port(id_priv
);
1556 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
1559 EXPORT_SYMBOL(rdma_bind_addr
);
1561 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
1562 struct rdma_route
*route
)
1564 struct sockaddr_in
*src4
, *dst4
;
1565 struct cma_hdr
*cma_hdr
;
1566 struct sdp_hh
*sdp_hdr
;
1568 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
1569 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
1574 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
1576 sdp_set_ip_ver(sdp_hdr
, 4);
1577 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1578 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1579 sdp_hdr
->port
= src4
->sin_port
;
1583 cma_hdr
->cma_version
= CMA_VERSION
;
1584 cma_set_ip_ver(cma_hdr
, 4);
1585 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1586 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1587 cma_hdr
->port
= src4
->sin_port
;
1593 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
1594 struct rdma_conn_param
*conn_param
)
1596 struct ib_cm_req_param req
;
1597 struct rdma_route
*route
;
1601 memset(&req
, 0, sizeof req
);
1602 offset
= cma_user_data_offset(id_priv
->id
.ps
);
1603 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
1604 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
1608 if (conn_param
->private_data
&& conn_param
->private_data_len
)
1609 memcpy(private_data
+ offset
, conn_param
->private_data
,
1610 conn_param
->private_data_len
);
1612 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
1614 if (IS_ERR(id_priv
->cm_id
.ib
)) {
1615 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
1619 route
= &id_priv
->id
.route
;
1620 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
1623 req
.private_data
= private_data
;
1625 req
.primary_path
= &route
->path_rec
[0];
1626 if (route
->num_paths
== 2)
1627 req
.alternate_path
= &route
->path_rec
[1];
1629 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1630 &route
->addr
.dst_addr
);
1631 req
.qp_num
= id_priv
->qp_num
;
1632 req
.qp_type
= id_priv
->qp_type
;
1633 req
.starting_psn
= id_priv
->seq_num
;
1634 req
.responder_resources
= conn_param
->responder_resources
;
1635 req
.initiator_depth
= conn_param
->initiator_depth
;
1636 req
.flow_control
= conn_param
->flow_control
;
1637 req
.retry_count
= conn_param
->retry_count
;
1638 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
1639 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
1640 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
1641 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
1642 req
.srq
= id_priv
->srq
? 1 : 0;
1644 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
1646 kfree(private_data
);
1650 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
1652 struct rdma_id_private
*id_priv
;
1655 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1656 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
1660 id_priv
->qp_num
= conn_param
->qp_num
;
1661 id_priv
->qp_type
= conn_param
->qp_type
;
1662 id_priv
->srq
= conn_param
->srq
;
1665 switch (id
->device
->node_type
) {
1667 ret
= cma_connect_ib(id_priv
, conn_param
);
1678 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
1681 EXPORT_SYMBOL(rdma_connect
);
1683 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
1684 struct rdma_conn_param
*conn_param
)
1686 struct ib_cm_rep_param rep
;
1689 ret
= cma_modify_qp_rtr(&id_priv
->id
);
1693 memset(&rep
, 0, sizeof rep
);
1694 rep
.qp_num
= id_priv
->qp_num
;
1695 rep
.starting_psn
= id_priv
->seq_num
;
1696 rep
.private_data
= conn_param
->private_data
;
1697 rep
.private_data_len
= conn_param
->private_data_len
;
1698 rep
.responder_resources
= conn_param
->responder_resources
;
1699 rep
.initiator_depth
= conn_param
->initiator_depth
;
1700 rep
.target_ack_delay
= CMA_CM_RESPONSE_TIMEOUT
;
1701 rep
.failover_accepted
= 0;
1702 rep
.flow_control
= conn_param
->flow_control
;
1703 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
1704 rep
.srq
= id_priv
->srq
? 1 : 0;
1706 return ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
1709 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
1711 struct rdma_id_private
*id_priv
;
1714 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1715 if (!cma_comp(id_priv
, CMA_CONNECT
))
1718 if (!id
->qp
&& conn_param
) {
1719 id_priv
->qp_num
= conn_param
->qp_num
;
1720 id_priv
->qp_type
= conn_param
->qp_type
;
1721 id_priv
->srq
= conn_param
->srq
;
1724 switch (id
->device
->node_type
) {
1727 ret
= cma_accept_ib(id_priv
, conn_param
);
1729 ret
= cma_rep_recv(id_priv
);
1741 cma_modify_qp_err(id
);
1742 rdma_reject(id
, NULL
, 0);
1745 EXPORT_SYMBOL(rdma_accept
);
1747 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
1748 u8 private_data_len
)
1750 struct rdma_id_private
*id_priv
;
1753 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1754 if (!cma_comp(id_priv
, CMA_CONNECT
))
1757 switch (id
->device
->node_type
) {
1759 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
1760 IB_CM_REJ_CONSUMER_DEFINED
, NULL
, 0,
1761 private_data
, private_data_len
);
1769 EXPORT_SYMBOL(rdma_reject
);
1771 int rdma_disconnect(struct rdma_cm_id
*id
)
1773 struct rdma_id_private
*id_priv
;
1776 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1777 if (!cma_comp(id_priv
, CMA_CONNECT
) &&
1778 !cma_comp(id_priv
, CMA_DISCONNECT
))
1781 ret
= cma_modify_qp_err(id
);
1785 switch (id
->device
->node_type
) {
1787 /* Initiate or respond to a disconnect. */
1788 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
1789 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
1797 EXPORT_SYMBOL(rdma_disconnect
);
1799 static void cma_add_one(struct ib_device
*device
)
1801 struct cma_device
*cma_dev
;
1802 struct rdma_id_private
*id_priv
;
1804 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
1808 cma_dev
->device
= device
;
1809 cma_dev
->node_guid
= device
->node_guid
;
1810 if (!cma_dev
->node_guid
)
1813 init_completion(&cma_dev
->comp
);
1814 atomic_set(&cma_dev
->refcount
, 1);
1815 INIT_LIST_HEAD(&cma_dev
->id_list
);
1816 ib_set_client_data(device
, &cma_client
, cma_dev
);
1819 list_add_tail(&cma_dev
->list
, &dev_list
);
1820 list_for_each_entry(id_priv
, &listen_any_list
, list
)
1821 cma_listen_on_dev(id_priv
, cma_dev
);
1822 mutex_unlock(&lock
);
1828 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
1830 enum cma_state state
;
1832 /* Record that we want to remove the device */
1833 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
1834 if (state
== CMA_DESTROYING
)
1837 cma_cancel_operation(id_priv
, state
);
1838 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
1840 /* Check for destruction from another callback. */
1841 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
1844 return cma_notify_user(id_priv
, RDMA_CM_EVENT_DEVICE_REMOVAL
,
1848 static void cma_process_remove(struct cma_device
*cma_dev
)
1850 struct list_head remove_list
;
1851 struct rdma_id_private
*id_priv
;
1854 INIT_LIST_HEAD(&remove_list
);
1857 while (!list_empty(&cma_dev
->id_list
)) {
1858 id_priv
= list_entry(cma_dev
->id_list
.next
,
1859 struct rdma_id_private
, list
);
1861 if (cma_internal_listen(id_priv
)) {
1862 cma_destroy_listen(id_priv
);
1866 list_del(&id_priv
->list
);
1867 list_add_tail(&id_priv
->list
, &remove_list
);
1868 atomic_inc(&id_priv
->refcount
);
1869 mutex_unlock(&lock
);
1871 ret
= cma_remove_id_dev(id_priv
);
1872 cma_deref_id(id_priv
);
1874 rdma_destroy_id(&id_priv
->id
);
1878 mutex_unlock(&lock
);
1880 cma_deref_dev(cma_dev
);
1881 wait_for_completion(&cma_dev
->comp
);
1884 static void cma_remove_one(struct ib_device
*device
)
1886 struct cma_device
*cma_dev
;
1888 cma_dev
= ib_get_client_data(device
, &cma_client
);
1893 list_del(&cma_dev
->list
);
1894 mutex_unlock(&lock
);
1896 cma_process_remove(cma_dev
);
1900 static int cma_init(void)
1904 cma_wq
= create_singlethread_workqueue("rdma_cm_wq");
1908 ret
= ib_register_client(&cma_client
);
1914 destroy_workqueue(cma_wq
);
1918 static void cma_cleanup(void)
1920 ib_unregister_client(&cma_client
);
1921 destroy_workqueue(cma_wq
);
1922 idr_destroy(&sdp_ps
);
1923 idr_destroy(&tcp_ps
);
1926 module_init(cma_init
);
1927 module_exit(cma_cleanup
);