2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/igmp.h>
42 #include <linux/idr.h>
43 #include <linux/inetdevice.h>
44 #include <linux/slab.h>
45 #include <linux/module.h>
46 #include <net/route.h>
48 #include <net/net_namespace.h>
49 #include <net/netns/generic.h>
52 #include <net/ip_fib.h>
53 #include <net/ip6_route.h>
55 #include <rdma/rdma_cm.h>
56 #include <rdma/rdma_cm_ib.h>
57 #include <rdma/rdma_netlink.h>
59 #include <rdma/ib_cache.h>
60 #include <rdma/ib_cm.h>
61 #include <rdma/ib_sa.h>
62 #include <rdma/iw_cm.h>
64 #include "core_priv.h"
66 MODULE_AUTHOR("Sean Hefty");
67 MODULE_DESCRIPTION("Generic RDMA CM Agent");
68 MODULE_LICENSE("Dual BSD/GPL");
70 #define CMA_CM_RESPONSE_TIMEOUT 20
71 #define CMA_MAX_CM_RETRIES 15
72 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
73 #define CMA_IBOE_PACKET_LIFETIME 18
75 static const char * const cma_events
[] = {
76 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
77 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
78 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
79 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
80 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
81 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
82 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
83 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
84 [RDMA_CM_EVENT_REJECTED
] = "rejected",
85 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
86 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
87 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
88 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
89 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
90 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
91 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
94 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
98 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
99 cma_events
[index
] : "unrecognized event";
101 EXPORT_SYMBOL(rdma_event_msg
);
103 static void cma_add_one(struct ib_device
*device
);
104 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
106 static struct ib_client cma_client
= {
109 .remove
= cma_remove_one
112 static struct ib_sa_client sa_client
;
113 static struct rdma_addr_client addr_client
;
114 static LIST_HEAD(dev_list
);
115 static LIST_HEAD(listen_any_list
);
116 static DEFINE_MUTEX(lock
);
117 static struct workqueue_struct
*cma_wq
;
118 static int cma_pernet_id
;
127 static struct cma_pernet
*cma_pernet(struct net
*net
)
129 return net_generic(net
, cma_pernet_id
);
132 static struct idr
*cma_pernet_idr(struct net
*net
, enum rdma_port_space ps
)
134 struct cma_pernet
*pernet
= cma_pernet(net
);
138 return &pernet
->tcp_ps
;
140 return &pernet
->udp_ps
;
142 return &pernet
->ipoib_ps
;
144 return &pernet
->ib_ps
;
151 struct list_head list
;
152 struct ib_device
*device
;
153 struct completion comp
;
155 struct list_head id_list
;
156 enum ib_gid_type
*default_gid_type
;
159 struct rdma_bind_list
{
160 enum rdma_port_space ps
;
161 struct hlist_head owners
;
165 static int cma_ps_alloc(struct net
*net
, enum rdma_port_space ps
,
166 struct rdma_bind_list
*bind_list
, int snum
)
168 struct idr
*idr
= cma_pernet_idr(net
, ps
);
170 return idr_alloc(idr
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
173 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
174 enum rdma_port_space ps
, int snum
)
176 struct idr
*idr
= cma_pernet_idr(net
, ps
);
178 return idr_find(idr
, snum
);
181 static void cma_ps_remove(struct net
*net
, enum rdma_port_space ps
, int snum
)
183 struct idr
*idr
= cma_pernet_idr(net
, ps
);
185 idr_remove(idr
, snum
);
192 void cma_ref_dev(struct cma_device
*cma_dev
)
194 atomic_inc(&cma_dev
->refcount
);
197 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
200 struct cma_device
*cma_dev
;
201 struct cma_device
*found_cma_dev
= NULL
;
205 list_for_each_entry(cma_dev
, &dev_list
, list
)
206 if (filter(cma_dev
->device
, cookie
)) {
207 found_cma_dev
= cma_dev
;
212 cma_ref_dev(found_cma_dev
);
214 return found_cma_dev
;
217 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
220 if (port
< rdma_start_port(cma_dev
->device
) ||
221 port
> rdma_end_port(cma_dev
->device
))
224 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
227 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
229 enum ib_gid_type default_gid_type
)
231 unsigned long supported_gids
;
233 if (port
< rdma_start_port(cma_dev
->device
) ||
234 port
> rdma_end_port(cma_dev
->device
))
237 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
239 if (!(supported_gids
& 1 << default_gid_type
))
242 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
248 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
250 return cma_dev
->device
;
254 * Device removal can occur at anytime, so we need extra handling to
255 * serialize notifying the user of device removal with other callbacks.
256 * We do this by disabling removal notification while a callback is in process,
257 * and reporting it after the callback completes.
259 struct rdma_id_private
{
260 struct rdma_cm_id id
;
262 struct rdma_bind_list
*bind_list
;
263 struct hlist_node node
;
264 struct list_head list
; /* listen_any_list or cma_device.list */
265 struct list_head listen_list
; /* per device listens */
266 struct cma_device
*cma_dev
;
267 struct list_head mc_list
;
270 enum rdma_cm_state state
;
272 struct mutex qp_mutex
;
274 struct completion comp
;
276 struct mutex handler_mutex
;
280 struct ib_sa_query
*query
;
296 enum ib_gid_type gid_type
;
299 struct cma_multicast
{
300 struct rdma_id_private
*id_priv
;
302 struct ib_sa_multicast
*ib
;
304 struct list_head list
;
306 struct sockaddr_storage addr
;
312 struct work_struct work
;
313 struct rdma_id_private
*id
;
314 enum rdma_cm_state old_state
;
315 enum rdma_cm_state new_state
;
316 struct rdma_cm_event event
;
319 struct cma_ndev_work
{
320 struct work_struct work
;
321 struct rdma_id_private
*id
;
322 struct rdma_cm_event event
;
325 struct iboe_mcast_work
{
326 struct work_struct work
;
327 struct rdma_id_private
*id
;
328 struct cma_multicast
*mc
;
341 u8 ip_version
; /* IP version: 7:4 */
343 union cma_ip_addr src_addr
;
344 union cma_ip_addr dst_addr
;
347 #define CMA_VERSION 0x00
349 struct cma_req_info
{
350 struct ib_device
*device
;
352 union ib_gid local_gid
;
358 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
363 spin_lock_irqsave(&id_priv
->lock
, flags
);
364 ret
= (id_priv
->state
== comp
);
365 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
369 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
370 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
375 spin_lock_irqsave(&id_priv
->lock
, flags
);
376 if ((ret
= (id_priv
->state
== comp
)))
377 id_priv
->state
= exch
;
378 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
382 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
383 enum rdma_cm_state exch
)
386 enum rdma_cm_state old
;
388 spin_lock_irqsave(&id_priv
->lock
, flags
);
389 old
= id_priv
->state
;
390 id_priv
->state
= exch
;
391 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
395 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
397 return hdr
->ip_version
>> 4;
400 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
402 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
405 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
407 struct in_device
*in_dev
= NULL
;
411 in_dev
= __in_dev_get_rtnl(ndev
);
414 ip_mc_inc_group(in_dev
,
415 *(__be32
*)(mgid
->raw
+ 12));
417 ip_mc_dec_group(in_dev
,
418 *(__be32
*)(mgid
->raw
+ 12));
422 return (in_dev
) ? 0 : -ENODEV
;
425 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
426 struct cma_device
*cma_dev
)
428 cma_ref_dev(cma_dev
);
429 id_priv
->cma_dev
= cma_dev
;
430 id_priv
->gid_type
= 0;
431 id_priv
->id
.device
= cma_dev
->device
;
432 id_priv
->id
.route
.addr
.dev_addr
.transport
=
433 rdma_node_get_transport(cma_dev
->device
->node_type
);
434 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
437 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
438 struct cma_device
*cma_dev
)
440 _cma_attach_to_dev(id_priv
, cma_dev
);
442 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
443 rdma_start_port(cma_dev
->device
)];
446 void cma_deref_dev(struct cma_device
*cma_dev
)
448 if (atomic_dec_and_test(&cma_dev
->refcount
))
449 complete(&cma_dev
->comp
);
452 static inline void release_mc(struct kref
*kref
)
454 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
456 kfree(mc
->multicast
.ib
);
460 static void cma_release_dev(struct rdma_id_private
*id_priv
)
463 list_del(&id_priv
->list
);
464 cma_deref_dev(id_priv
->cma_dev
);
465 id_priv
->cma_dev
= NULL
;
469 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
471 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
474 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
476 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
479 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
481 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
484 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
486 struct ib_sa_mcmember_rec rec
;
490 if (qkey
&& id_priv
->qkey
!= qkey
)
496 id_priv
->qkey
= qkey
;
500 switch (id_priv
->id
.ps
) {
503 id_priv
->qkey
= RDMA_UDP_QKEY
;
506 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
507 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
508 id_priv
->id
.port_num
, &rec
.mgid
,
511 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
519 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
521 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
522 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
523 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
526 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
530 if (addr
->sa_family
!= AF_IB
) {
531 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
533 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
540 static inline int cma_validate_port(struct ib_device
*device
, u8 port
,
541 enum ib_gid_type gid_type
,
542 union ib_gid
*gid
, int dev_type
,
546 struct net_device
*ndev
= NULL
;
548 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
551 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
554 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
)) {
555 ndev
= dev_get_by_index(&init_net
, bound_if_index
);
556 if (ndev
&& ndev
->flags
& IFF_LOOPBACK
) {
557 pr_info("detected loopback device\n");
560 if (!device
->get_netdev
)
563 ndev
= device
->get_netdev(device
, port
);
568 gid_type
= IB_GID_TYPE_IB
;
571 ret
= ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
580 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
581 struct rdma_id_private
*listen_id_priv
)
583 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
584 struct cma_device
*cma_dev
;
585 union ib_gid gid
, iboe_gid
, *gidp
;
589 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
590 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
594 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
597 memcpy(&gid
, dev_addr
->src_dev_addr
+
598 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
600 if (listen_id_priv
) {
601 cma_dev
= listen_id_priv
->cma_dev
;
602 port
= listen_id_priv
->id
.port_num
;
603 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
606 ret
= cma_validate_port(cma_dev
->device
, port
,
607 rdma_protocol_ib(cma_dev
->device
, port
) ?
609 listen_id_priv
->gid_type
, gidp
,
611 dev_addr
->bound_dev_if
);
613 id_priv
->id
.port_num
= port
;
618 list_for_each_entry(cma_dev
, &dev_list
, list
) {
619 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
620 if (listen_id_priv
&&
621 listen_id_priv
->cma_dev
== cma_dev
&&
622 listen_id_priv
->id
.port_num
== port
)
625 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
628 ret
= cma_validate_port(cma_dev
->device
, port
,
629 rdma_protocol_ib(cma_dev
->device
, port
) ?
631 cma_dev
->default_gid_type
[port
- 1],
632 gidp
, dev_addr
->dev_type
,
633 dev_addr
->bound_dev_if
);
635 id_priv
->id
.port_num
= port
;
643 cma_attach_to_dev(id_priv
, cma_dev
);
650 * Select the source IB device and address to reach the destination IB address.
652 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
654 struct cma_device
*cma_dev
, *cur_dev
;
655 struct sockaddr_ib
*addr
;
656 union ib_gid gid
, sgid
, *dgid
;
662 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
663 dgid
= (union ib_gid
*) &addr
->sib_addr
;
664 pkey
= ntohs(addr
->sib_pkey
);
666 list_for_each_entry(cur_dev
, &dev_list
, list
) {
667 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
668 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
671 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
674 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
,
677 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
680 id_priv
->id
.port_num
= p
;
684 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
685 dgid
->global
.subnet_prefix
)) {
688 id_priv
->id
.port_num
= p
;
698 cma_attach_to_dev(id_priv
, cma_dev
);
699 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
700 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
701 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
705 static void cma_deref_id(struct rdma_id_private
*id_priv
)
707 if (atomic_dec_and_test(&id_priv
->refcount
))
708 complete(&id_priv
->comp
);
711 struct rdma_cm_id
*rdma_create_id(struct net
*net
,
712 rdma_cm_event_handler event_handler
,
713 void *context
, enum rdma_port_space ps
,
714 enum ib_qp_type qp_type
)
716 struct rdma_id_private
*id_priv
;
718 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
720 return ERR_PTR(-ENOMEM
);
722 id_priv
->owner
= task_pid_nr(current
);
723 id_priv
->state
= RDMA_CM_IDLE
;
724 id_priv
->id
.context
= context
;
725 id_priv
->id
.event_handler
= event_handler
;
727 id_priv
->id
.qp_type
= qp_type
;
728 spin_lock_init(&id_priv
->lock
);
729 mutex_init(&id_priv
->qp_mutex
);
730 init_completion(&id_priv
->comp
);
731 atomic_set(&id_priv
->refcount
, 1);
732 mutex_init(&id_priv
->handler_mutex
);
733 INIT_LIST_HEAD(&id_priv
->listen_list
);
734 INIT_LIST_HEAD(&id_priv
->mc_list
);
735 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
736 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
740 EXPORT_SYMBOL(rdma_create_id
);
742 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
744 struct ib_qp_attr qp_attr
;
745 int qp_attr_mask
, ret
;
747 qp_attr
.qp_state
= IB_QPS_INIT
;
748 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
752 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
756 qp_attr
.qp_state
= IB_QPS_RTR
;
757 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
761 qp_attr
.qp_state
= IB_QPS_RTS
;
763 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
768 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
770 struct ib_qp_attr qp_attr
;
771 int qp_attr_mask
, ret
;
773 qp_attr
.qp_state
= IB_QPS_INIT
;
774 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
778 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
781 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
782 struct ib_qp_init_attr
*qp_init_attr
)
784 struct rdma_id_private
*id_priv
;
788 id_priv
= container_of(id
, struct rdma_id_private
, id
);
789 if (id
->device
!= pd
->device
)
792 qp_init_attr
->port_num
= id
->port_num
;
793 qp
= ib_create_qp(pd
, qp_init_attr
);
797 if (id
->qp_type
== IB_QPT_UD
)
798 ret
= cma_init_ud_qp(id_priv
, qp
);
800 ret
= cma_init_conn_qp(id_priv
, qp
);
805 id_priv
->qp_num
= qp
->qp_num
;
806 id_priv
->srq
= (qp
->srq
!= NULL
);
812 EXPORT_SYMBOL(rdma_create_qp
);
814 void rdma_destroy_qp(struct rdma_cm_id
*id
)
816 struct rdma_id_private
*id_priv
;
818 id_priv
= container_of(id
, struct rdma_id_private
, id
);
819 mutex_lock(&id_priv
->qp_mutex
);
820 ib_destroy_qp(id_priv
->id
.qp
);
821 id_priv
->id
.qp
= NULL
;
822 mutex_unlock(&id_priv
->qp_mutex
);
824 EXPORT_SYMBOL(rdma_destroy_qp
);
826 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
827 struct rdma_conn_param
*conn_param
)
829 struct ib_qp_attr qp_attr
;
830 int qp_attr_mask
, ret
;
833 mutex_lock(&id_priv
->qp_mutex
);
834 if (!id_priv
->id
.qp
) {
839 /* Need to update QP attributes from default values. */
840 qp_attr
.qp_state
= IB_QPS_INIT
;
841 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
845 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
849 qp_attr
.qp_state
= IB_QPS_RTR
;
850 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
854 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
855 qp_attr
.ah_attr
.grh
.sgid_index
, &sgid
, NULL
);
859 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
862 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
863 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
865 mutex_unlock(&id_priv
->qp_mutex
);
869 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
870 struct rdma_conn_param
*conn_param
)
872 struct ib_qp_attr qp_attr
;
873 int qp_attr_mask
, ret
;
875 mutex_lock(&id_priv
->qp_mutex
);
876 if (!id_priv
->id
.qp
) {
881 qp_attr
.qp_state
= IB_QPS_RTS
;
882 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
887 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
888 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
890 mutex_unlock(&id_priv
->qp_mutex
);
894 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
896 struct ib_qp_attr qp_attr
;
899 mutex_lock(&id_priv
->qp_mutex
);
900 if (!id_priv
->id
.qp
) {
905 qp_attr
.qp_state
= IB_QPS_ERR
;
906 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
908 mutex_unlock(&id_priv
->qp_mutex
);
912 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
913 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
915 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
919 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
922 pkey
= ib_addr_get_pkey(dev_addr
);
924 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
925 pkey
, &qp_attr
->pkey_index
);
929 qp_attr
->port_num
= id_priv
->id
.port_num
;
930 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
932 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
933 ret
= cma_set_qkey(id_priv
, 0);
937 qp_attr
->qkey
= id_priv
->qkey
;
938 *qp_attr_mask
|= IB_QP_QKEY
;
940 qp_attr
->qp_access_flags
= 0;
941 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
946 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
949 struct rdma_id_private
*id_priv
;
952 id_priv
= container_of(id
, struct rdma_id_private
, id
);
953 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
954 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
955 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
957 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
960 if (qp_attr
->qp_state
== IB_QPS_RTR
)
961 qp_attr
->rq_psn
= id_priv
->seq_num
;
962 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
963 if (!id_priv
->cm_id
.iw
) {
964 qp_attr
->qp_access_flags
= 0;
965 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
967 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
974 EXPORT_SYMBOL(rdma_init_qp_attr
);
976 static inline int cma_zero_addr(struct sockaddr
*addr
)
978 switch (addr
->sa_family
) {
980 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
982 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
984 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
990 static inline int cma_loopback_addr(struct sockaddr
*addr
)
992 switch (addr
->sa_family
) {
994 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
996 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
998 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1004 static inline int cma_any_addr(struct sockaddr
*addr
)
1006 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1009 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
1011 if (src
->sa_family
!= dst
->sa_family
)
1014 switch (src
->sa_family
) {
1016 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
1017 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1019 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1020 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1022 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1023 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1027 static __be16
cma_port(struct sockaddr
*addr
)
1029 struct sockaddr_ib
*sib
;
1031 switch (addr
->sa_family
) {
1033 return ((struct sockaddr_in
*) addr
)->sin_port
;
1035 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1037 sib
= (struct sockaddr_ib
*) addr
;
1038 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1039 be64_to_cpu(sib
->sib_sid_mask
)));
1045 static inline int cma_any_port(struct sockaddr
*addr
)
1047 return !cma_port(addr
);
1050 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1051 struct sockaddr
*dst_addr
,
1052 struct rdma_cm_id
*listen_id
,
1053 struct ib_sa_path_rec
*path
)
1055 struct sockaddr_ib
*listen_ib
, *ib
;
1057 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1059 ib
= (struct sockaddr_ib
*)src_addr
;
1060 ib
->sib_family
= AF_IB
;
1062 ib
->sib_pkey
= path
->pkey
;
1063 ib
->sib_flowinfo
= path
->flow_label
;
1064 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1065 ib
->sib_sid
= path
->service_id
;
1066 ib
->sib_scope_id
= 0;
1068 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1069 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1070 ib
->sib_addr
= listen_ib
->sib_addr
;
1071 ib
->sib_sid
= listen_ib
->sib_sid
;
1072 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1074 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1077 ib
= (struct sockaddr_ib
*)dst_addr
;
1078 ib
->sib_family
= AF_IB
;
1080 ib
->sib_pkey
= path
->pkey
;
1081 ib
->sib_flowinfo
= path
->flow_label
;
1082 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1087 static void cma_save_ip4_info(struct sockaddr
*src_addr
,
1088 struct sockaddr
*dst_addr
,
1089 struct cma_hdr
*hdr
,
1092 struct sockaddr_in
*ip4
;
1095 ip4
= (struct sockaddr_in
*)src_addr
;
1096 ip4
->sin_family
= AF_INET
;
1097 ip4
->sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
;
1098 ip4
->sin_port
= local_port
;
1102 ip4
= (struct sockaddr_in
*)dst_addr
;
1103 ip4
->sin_family
= AF_INET
;
1104 ip4
->sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
;
1105 ip4
->sin_port
= hdr
->port
;
1109 static void cma_save_ip6_info(struct sockaddr
*src_addr
,
1110 struct sockaddr
*dst_addr
,
1111 struct cma_hdr
*hdr
,
1114 struct sockaddr_in6
*ip6
;
1117 ip6
= (struct sockaddr_in6
*)src_addr
;
1118 ip6
->sin6_family
= AF_INET6
;
1119 ip6
->sin6_addr
= hdr
->dst_addr
.ip6
;
1120 ip6
->sin6_port
= local_port
;
1124 ip6
= (struct sockaddr_in6
*)dst_addr
;
1125 ip6
->sin6_family
= AF_INET6
;
1126 ip6
->sin6_addr
= hdr
->src_addr
.ip6
;
1127 ip6
->sin6_port
= hdr
->port
;
1131 static u16
cma_port_from_service_id(__be64 service_id
)
1133 return (u16
)be64_to_cpu(service_id
);
1136 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1137 struct sockaddr
*dst_addr
,
1138 struct ib_cm_event
*ib_event
,
1141 struct cma_hdr
*hdr
;
1144 hdr
= ib_event
->private_data
;
1145 if (hdr
->cma_version
!= CMA_VERSION
)
1148 port
= htons(cma_port_from_service_id(service_id
));
1150 switch (cma_get_ip_ver(hdr
)) {
1152 cma_save_ip4_info(src_addr
, dst_addr
, hdr
, port
);
1155 cma_save_ip6_info(src_addr
, dst_addr
, hdr
, port
);
1158 return -EAFNOSUPPORT
;
1164 static int cma_save_net_info(struct sockaddr
*src_addr
,
1165 struct sockaddr
*dst_addr
,
1166 struct rdma_cm_id
*listen_id
,
1167 struct ib_cm_event
*ib_event
,
1168 sa_family_t sa_family
, __be64 service_id
)
1170 if (sa_family
== AF_IB
) {
1171 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1172 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1173 ib_event
->param
.req_rcvd
.primary_path
);
1174 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1175 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1179 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1182 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1183 struct cma_req_info
*req
)
1185 const struct ib_cm_req_event_param
*req_param
=
1186 &ib_event
->param
.req_rcvd
;
1187 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1188 &ib_event
->param
.sidr_req_rcvd
;
1190 switch (ib_event
->event
) {
1191 case IB_CM_REQ_RECEIVED
:
1192 req
->device
= req_param
->listen_id
->device
;
1193 req
->port
= req_param
->port
;
1194 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1195 sizeof(req
->local_gid
));
1196 req
->has_gid
= true;
1197 req
->service_id
= req_param
->primary_path
->service_id
;
1198 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1199 if (req
->pkey
!= req_param
->bth_pkey
)
1200 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1201 "RDMA CMA: in the future this may cause the request to be dropped\n",
1202 req_param
->bth_pkey
, req
->pkey
);
1204 case IB_CM_SIDR_REQ_RECEIVED
:
1205 req
->device
= sidr_param
->listen_id
->device
;
1206 req
->port
= sidr_param
->port
;
1207 req
->has_gid
= false;
1208 req
->service_id
= sidr_param
->service_id
;
1209 req
->pkey
= sidr_param
->pkey
;
1210 if (req
->pkey
!= sidr_param
->bth_pkey
)
1211 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1212 "RDMA CMA: in the future this may cause the request to be dropped\n",
1213 sidr_param
->bth_pkey
, req
->pkey
);
1222 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1223 const struct sockaddr_in
*dst_addr
,
1224 const struct sockaddr_in
*src_addr
)
1226 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1227 saddr
= src_addr
->sin_addr
.s_addr
;
1228 struct fib_result res
;
1233 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1234 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1235 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1236 ipv4_is_loopback(saddr
))
1239 memset(&fl4
, 0, sizeof(fl4
));
1240 fl4
.flowi4_iif
= net_dev
->ifindex
;
1245 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1246 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1252 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1253 const struct sockaddr_in6
*dst_addr
,
1254 const struct sockaddr_in6
*src_addr
)
1256 #if IS_ENABLED(CONFIG_IPV6)
1257 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1258 IPV6_ADDR_LINKLOCAL
;
1259 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1260 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1267 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1276 static bool validate_net_dev(struct net_device
*net_dev
,
1277 const struct sockaddr
*daddr
,
1278 const struct sockaddr
*saddr
)
1280 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1281 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1282 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1283 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1285 switch (daddr
->sa_family
) {
1287 return saddr
->sa_family
== AF_INET
&&
1288 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1291 return saddr
->sa_family
== AF_INET6
&&
1292 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1299 static struct net_device
*cma_get_net_dev(struct ib_cm_event
*ib_event
,
1300 const struct cma_req_info
*req
)
1302 struct sockaddr_storage listen_addr_storage
, src_addr_storage
;
1303 struct sockaddr
*listen_addr
= (struct sockaddr
*)&listen_addr_storage
,
1304 *src_addr
= (struct sockaddr
*)&src_addr_storage
;
1305 struct net_device
*net_dev
;
1306 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1309 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1312 return ERR_PTR(err
);
1314 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
, req
->pkey
,
1317 return ERR_PTR(-ENODEV
);
1319 if (!validate_net_dev(net_dev
, listen_addr
, src_addr
)) {
1321 return ERR_PTR(-EHOSTUNREACH
);
1327 static enum rdma_port_space
rdma_ps_from_service_id(__be64 service_id
)
1329 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1332 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1333 const struct cma_hdr
*hdr
)
1335 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1337 struct in6_addr ip6_addr
;
1339 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1342 switch (addr
->sa_family
) {
1344 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1345 if (cma_get_ip_ver(hdr
) != 4)
1347 if (!cma_any_addr(addr
) &&
1348 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1352 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1353 if (cma_get_ip_ver(hdr
) != 6)
1355 if (!cma_any_addr(addr
) &&
1356 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1368 static bool cma_protocol_roce_dev_port(struct ib_device
*device
, int port_num
)
1370 enum rdma_link_layer ll
= rdma_port_get_link_layer(device
, port_num
);
1371 enum rdma_transport_type transport
=
1372 rdma_node_get_transport(device
->node_type
);
1374 return ll
== IB_LINK_LAYER_ETHERNET
&& transport
== RDMA_TRANSPORT_IB
;
1377 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1379 struct ib_device
*device
= id
->device
;
1380 const int port_num
= id
->port_num
?: rdma_start_port(device
);
1382 return cma_protocol_roce_dev_port(device
, port_num
);
1385 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1386 const struct net_device
*net_dev
,
1389 const struct rdma_addr
*addr
= &id
->route
.addr
;
1392 /* This request is an AF_IB request or a RoCE request */
1393 return (!id
->port_num
|| id
->port_num
== port_num
) &&
1394 (addr
->src_addr
.ss_family
== AF_IB
||
1395 cma_protocol_roce_dev_port(id
->device
, port_num
));
1397 return !addr
->dev_addr
.bound_dev_if
||
1398 (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1399 addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
);
1402 static struct rdma_id_private
*cma_find_listener(
1403 const struct rdma_bind_list
*bind_list
,
1404 const struct ib_cm_id
*cm_id
,
1405 const struct ib_cm_event
*ib_event
,
1406 const struct cma_req_info
*req
,
1407 const struct net_device
*net_dev
)
1409 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1412 return ERR_PTR(-EINVAL
);
1414 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1415 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1416 if (id_priv
->id
.device
== cm_id
->device
&&
1417 cma_match_net_dev(&id_priv
->id
, net_dev
, req
->port
))
1419 list_for_each_entry(id_priv_dev
,
1420 &id_priv
->listen_list
,
1422 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1423 cma_match_net_dev(&id_priv_dev
->id
, net_dev
, req
->port
))
1429 return ERR_PTR(-EINVAL
);
1432 static struct rdma_id_private
*cma_id_from_event(struct ib_cm_id
*cm_id
,
1433 struct ib_cm_event
*ib_event
,
1434 struct net_device
**net_dev
)
1436 struct cma_req_info req
;
1437 struct rdma_bind_list
*bind_list
;
1438 struct rdma_id_private
*id_priv
;
1441 err
= cma_save_req_info(ib_event
, &req
);
1443 return ERR_PTR(err
);
1445 *net_dev
= cma_get_net_dev(ib_event
, &req
);
1446 if (IS_ERR(*net_dev
)) {
1447 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1448 /* Assuming the protocol is AF_IB */
1450 } else if (cma_protocol_roce_dev_port(req
.device
, req
.port
)) {
1451 /* TODO find the net dev matching the request parameters
1452 * through the RoCE GID table */
1455 return ERR_CAST(*net_dev
);
1459 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1460 rdma_ps_from_service_id(req
.service_id
),
1461 cma_port_from_service_id(req
.service_id
));
1462 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, &req
, *net_dev
);
1463 if (IS_ERR(id_priv
) && *net_dev
) {
1471 static inline int cma_user_data_offset(struct rdma_id_private
*id_priv
)
1473 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1476 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1478 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1480 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1484 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1486 struct rdma_id_private
*dev_id_priv
;
1489 * Remove from listen_any_list to prevent added devices from spawning
1490 * additional listen requests.
1493 list_del(&id_priv
->list
);
1495 while (!list_empty(&id_priv
->listen_list
)) {
1496 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1497 struct rdma_id_private
, listen_list
);
1498 /* sync with device removal to avoid duplicate destruction */
1499 list_del_init(&dev_id_priv
->list
);
1500 list_del(&dev_id_priv
->listen_list
);
1501 mutex_unlock(&lock
);
1503 rdma_destroy_id(&dev_id_priv
->id
);
1506 mutex_unlock(&lock
);
1509 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1510 enum rdma_cm_state state
)
1513 case RDMA_CM_ADDR_QUERY
:
1514 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1516 case RDMA_CM_ROUTE_QUERY
:
1517 cma_cancel_route(id_priv
);
1519 case RDMA_CM_LISTEN
:
1520 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1521 cma_cancel_listens(id_priv
);
1528 static void cma_release_port(struct rdma_id_private
*id_priv
)
1530 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1531 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1537 hlist_del(&id_priv
->node
);
1538 if (hlist_empty(&bind_list
->owners
)) {
1539 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1542 mutex_unlock(&lock
);
1545 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1547 struct cma_multicast
*mc
;
1549 while (!list_empty(&id_priv
->mc_list
)) {
1550 mc
= container_of(id_priv
->mc_list
.next
,
1551 struct cma_multicast
, list
);
1552 list_del(&mc
->list
);
1553 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1554 id_priv
->id
.port_num
)) {
1555 ib_sa_free_multicast(mc
->multicast
.ib
);
1558 if (mc
->igmp_joined
) {
1559 struct rdma_dev_addr
*dev_addr
=
1560 &id_priv
->id
.route
.addr
.dev_addr
;
1561 struct net_device
*ndev
= NULL
;
1563 if (dev_addr
->bound_dev_if
)
1564 ndev
= dev_get_by_index(&init_net
,
1565 dev_addr
->bound_dev_if
);
1568 &mc
->multicast
.ib
->rec
.mgid
,
1573 kref_put(&mc
->mcref
, release_mc
);
1578 void rdma_destroy_id(struct rdma_cm_id
*id
)
1580 struct rdma_id_private
*id_priv
;
1581 enum rdma_cm_state state
;
1583 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1584 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1585 cma_cancel_operation(id_priv
, state
);
1588 * Wait for any active callback to finish. New callbacks will find
1589 * the id_priv state set to destroying and abort.
1591 mutex_lock(&id_priv
->handler_mutex
);
1592 mutex_unlock(&id_priv
->handler_mutex
);
1594 if (id_priv
->cma_dev
) {
1595 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1596 if (id_priv
->cm_id
.ib
)
1597 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1598 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1599 if (id_priv
->cm_id
.iw
)
1600 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1602 cma_leave_mc_groups(id_priv
);
1603 cma_release_dev(id_priv
);
1606 cma_release_port(id_priv
);
1607 cma_deref_id(id_priv
);
1608 wait_for_completion(&id_priv
->comp
);
1610 if (id_priv
->internal_id
)
1611 cma_deref_id(id_priv
->id
.context
);
1613 kfree(id_priv
->id
.route
.path_rec
);
1614 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
1617 EXPORT_SYMBOL(rdma_destroy_id
);
1619 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1623 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1627 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1631 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1637 cma_modify_qp_err(id_priv
);
1638 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1643 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1644 struct ib_cm_rep_event_param
*rep_data
,
1647 event
->param
.conn
.private_data
= private_data
;
1648 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1649 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1650 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1651 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1652 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1653 event
->param
.conn
.srq
= rep_data
->srq
;
1654 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1657 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1659 struct rdma_id_private
*id_priv
= cm_id
->context
;
1660 struct rdma_cm_event event
;
1663 mutex_lock(&id_priv
->handler_mutex
);
1664 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1665 id_priv
->state
!= RDMA_CM_CONNECT
) ||
1666 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1667 id_priv
->state
!= RDMA_CM_DISCONNECT
))
1670 memset(&event
, 0, sizeof event
);
1671 switch (ib_event
->event
) {
1672 case IB_CM_REQ_ERROR
:
1673 case IB_CM_REP_ERROR
:
1674 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1675 event
.status
= -ETIMEDOUT
;
1677 case IB_CM_REP_RECEIVED
:
1678 if (id_priv
->id
.qp
) {
1679 event
.status
= cma_rep_recv(id_priv
);
1680 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1681 RDMA_CM_EVENT_ESTABLISHED
;
1683 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1685 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1686 ib_event
->private_data
);
1688 case IB_CM_RTU_RECEIVED
:
1689 case IB_CM_USER_ESTABLISHED
:
1690 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1692 case IB_CM_DREQ_ERROR
:
1693 event
.status
= -ETIMEDOUT
; /* fall through */
1694 case IB_CM_DREQ_RECEIVED
:
1695 case IB_CM_DREP_RECEIVED
:
1696 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1697 RDMA_CM_DISCONNECT
))
1699 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1701 case IB_CM_TIMEWAIT_EXIT
:
1702 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1704 case IB_CM_MRA_RECEIVED
:
1707 case IB_CM_REJ_RECEIVED
:
1708 cma_modify_qp_err(id_priv
);
1709 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1710 event
.event
= RDMA_CM_EVENT_REJECTED
;
1711 event
.param
.conn
.private_data
= ib_event
->private_data
;
1712 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1715 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
1720 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1722 /* Destroy the CM ID by returning a non-zero value. */
1723 id_priv
->cm_id
.ib
= NULL
;
1724 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1725 mutex_unlock(&id_priv
->handler_mutex
);
1726 rdma_destroy_id(&id_priv
->id
);
1730 mutex_unlock(&id_priv
->handler_mutex
);
1734 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1735 struct ib_cm_event
*ib_event
,
1736 struct net_device
*net_dev
)
1738 struct rdma_id_private
*id_priv
;
1739 struct rdma_cm_id
*id
;
1740 struct rdma_route
*rt
;
1741 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1742 const __be64 service_id
=
1743 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
1746 id
= rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
1747 listen_id
->event_handler
, listen_id
->context
,
1748 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1752 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1753 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1754 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1755 listen_id
, ib_event
, ss_family
, service_id
))
1759 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1760 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1765 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1766 if (rt
->num_paths
== 2)
1767 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1770 ret
= rdma_copy_addr(&rt
->addr
.dev_addr
, net_dev
, NULL
);
1774 if (!cma_protocol_roce(listen_id
) &&
1775 cma_any_addr(cma_src_addr(id_priv
))) {
1776 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1777 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1778 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1779 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
1780 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
1785 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1787 id_priv
->state
= RDMA_CM_CONNECT
;
1791 rdma_destroy_id(id
);
1795 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1796 struct ib_cm_event
*ib_event
,
1797 struct net_device
*net_dev
)
1799 struct rdma_id_private
*id_priv
;
1800 struct rdma_cm_id
*id
;
1801 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1802 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
1805 id
= rdma_create_id(net
, listen_id
->event_handler
, listen_id
->context
,
1806 listen_id
->ps
, IB_QPT_UD
);
1810 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1811 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1812 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1813 listen_id
, ib_event
, ss_family
,
1814 ib_event
->param
.sidr_req_rcvd
.service_id
))
1818 ret
= rdma_copy_addr(&id
->route
.addr
.dev_addr
, net_dev
, NULL
);
1822 if (!cma_any_addr(cma_src_addr(id_priv
))) {
1823 ret
= cma_translate_addr(cma_src_addr(id_priv
),
1824 &id
->route
.addr
.dev_addr
);
1830 id_priv
->state
= RDMA_CM_CONNECT
;
1833 rdma_destroy_id(id
);
1837 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1838 struct ib_cm_req_event_param
*req_data
,
1839 void *private_data
, int offset
)
1841 event
->param
.conn
.private_data
= private_data
+ offset
;
1842 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1843 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1844 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1845 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1846 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1847 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1848 event
->param
.conn
.srq
= req_data
->srq
;
1849 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1852 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1854 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1855 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1856 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1857 (id
->qp_type
== IB_QPT_UD
)) ||
1861 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1863 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
1864 struct rdma_cm_event event
;
1865 struct net_device
*net_dev
;
1868 listen_id
= cma_id_from_event(cm_id
, ib_event
, &net_dev
);
1869 if (IS_ERR(listen_id
))
1870 return PTR_ERR(listen_id
);
1872 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
)) {
1877 mutex_lock(&listen_id
->handler_mutex
);
1878 if (listen_id
->state
!= RDMA_CM_LISTEN
) {
1879 ret
= -ECONNABORTED
;
1883 memset(&event
, 0, sizeof event
);
1884 offset
= cma_user_data_offset(listen_id
);
1885 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1886 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1887 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
1888 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1889 event
.param
.ud
.private_data_len
=
1890 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1892 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
1893 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1894 ib_event
->private_data
, offset
);
1901 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1902 ret
= cma_acquire_dev(conn_id
, listen_id
);
1906 conn_id
->cm_id
.ib
= cm_id
;
1907 cm_id
->context
= conn_id
;
1908 cm_id
->cm_handler
= cma_ib_handler
;
1911 * Protect against the user destroying conn_id from another thread
1912 * until we're done accessing it.
1914 atomic_inc(&conn_id
->refcount
);
1915 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1919 * Acquire mutex to prevent user executing rdma_destroy_id()
1920 * while we're accessing the cm_id.
1923 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
1924 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1925 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1926 mutex_unlock(&lock
);
1927 mutex_unlock(&conn_id
->handler_mutex
);
1928 mutex_unlock(&listen_id
->handler_mutex
);
1929 cma_deref_id(conn_id
);
1935 cma_deref_id(conn_id
);
1936 /* Destroy the CM ID by returning a non-zero value. */
1937 conn_id
->cm_id
.ib
= NULL
;
1939 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1940 mutex_unlock(&conn_id
->handler_mutex
);
1942 mutex_unlock(&listen_id
->handler_mutex
);
1944 rdma_destroy_id(&conn_id
->id
);
1953 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1955 if (addr
->sa_family
== AF_IB
)
1956 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
1958 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1960 EXPORT_SYMBOL(rdma_get_service_id
);
1962 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1964 struct rdma_id_private
*id_priv
= iw_id
->context
;
1965 struct rdma_cm_event event
;
1967 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1968 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1970 mutex_lock(&id_priv
->handler_mutex
);
1971 if (id_priv
->state
!= RDMA_CM_CONNECT
)
1974 memset(&event
, 0, sizeof event
);
1975 switch (iw_event
->event
) {
1976 case IW_CM_EVENT_CLOSE
:
1977 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1979 case IW_CM_EVENT_CONNECT_REPLY
:
1980 memcpy(cma_src_addr(id_priv
), laddr
,
1981 rdma_addr_size(laddr
));
1982 memcpy(cma_dst_addr(id_priv
), raddr
,
1983 rdma_addr_size(raddr
));
1984 switch (iw_event
->status
) {
1986 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1987 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1988 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1992 event
.event
= RDMA_CM_EVENT_REJECTED
;
1995 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1998 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2002 case IW_CM_EVENT_ESTABLISHED
:
2003 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2004 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2005 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2011 event
.status
= iw_event
->status
;
2012 event
.param
.conn
.private_data
= iw_event
->private_data
;
2013 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2014 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2016 /* Destroy the CM ID by returning a non-zero value. */
2017 id_priv
->cm_id
.iw
= NULL
;
2018 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2019 mutex_unlock(&id_priv
->handler_mutex
);
2020 rdma_destroy_id(&id_priv
->id
);
2025 mutex_unlock(&id_priv
->handler_mutex
);
2029 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2030 struct iw_cm_event
*iw_event
)
2032 struct rdma_cm_id
*new_cm_id
;
2033 struct rdma_id_private
*listen_id
, *conn_id
;
2034 struct rdma_cm_event event
;
2035 int ret
= -ECONNABORTED
;
2036 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2037 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2039 listen_id
= cm_id
->context
;
2041 mutex_lock(&listen_id
->handler_mutex
);
2042 if (listen_id
->state
!= RDMA_CM_LISTEN
)
2045 /* Create a new RDMA id for the new IW CM ID */
2046 new_cm_id
= rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2047 listen_id
->id
.event_handler
,
2048 listen_id
->id
.context
,
2049 RDMA_PS_TCP
, IB_QPT_RC
);
2050 if (IS_ERR(new_cm_id
)) {
2054 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
2055 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2056 conn_id
->state
= RDMA_CM_CONNECT
;
2058 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
2060 mutex_unlock(&conn_id
->handler_mutex
);
2061 rdma_destroy_id(new_cm_id
);
2065 ret
= cma_acquire_dev(conn_id
, listen_id
);
2067 mutex_unlock(&conn_id
->handler_mutex
);
2068 rdma_destroy_id(new_cm_id
);
2072 conn_id
->cm_id
.iw
= cm_id
;
2073 cm_id
->context
= conn_id
;
2074 cm_id
->cm_handler
= cma_iw_handler
;
2076 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2077 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2079 memset(&event
, 0, sizeof event
);
2080 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2081 event
.param
.conn
.private_data
= iw_event
->private_data
;
2082 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2083 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2084 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2087 * Protect against the user destroying conn_id from another thread
2088 * until we're done accessing it.
2090 atomic_inc(&conn_id
->refcount
);
2091 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
2093 /* User wants to destroy the CM ID */
2094 conn_id
->cm_id
.iw
= NULL
;
2095 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2096 mutex_unlock(&conn_id
->handler_mutex
);
2097 cma_deref_id(conn_id
);
2098 rdma_destroy_id(&conn_id
->id
);
2102 mutex_unlock(&conn_id
->handler_mutex
);
2103 cma_deref_id(conn_id
);
2106 mutex_unlock(&listen_id
->handler_mutex
);
2110 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2112 struct sockaddr
*addr
;
2113 struct ib_cm_id
*id
;
2116 addr
= cma_src_addr(id_priv
);
2117 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2118 id
= ib_cm_insert_listen(id_priv
->id
.device
, cma_req_handler
, svc_id
);
2121 id_priv
->cm_id
.ib
= id
;
2126 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2129 struct iw_cm_id
*id
;
2131 id
= iw_create_cm_id(id_priv
->id
.device
,
2132 iw_conn_req_handler
,
2137 id
->tos
= id_priv
->tos
;
2138 id_priv
->cm_id
.iw
= id
;
2140 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2141 rdma_addr_size(cma_src_addr(id_priv
)));
2143 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2146 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2147 id_priv
->cm_id
.iw
= NULL
;
2153 static int cma_listen_handler(struct rdma_cm_id
*id
,
2154 struct rdma_cm_event
*event
)
2156 struct rdma_id_private
*id_priv
= id
->context
;
2158 id
->context
= id_priv
->id
.context
;
2159 id
->event_handler
= id_priv
->id
.event_handler
;
2160 return id_priv
->id
.event_handler(id
, event
);
2163 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2164 struct cma_device
*cma_dev
)
2166 struct rdma_id_private
*dev_id_priv
;
2167 struct rdma_cm_id
*id
;
2168 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2171 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2174 id
= rdma_create_id(net
, cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
2175 id_priv
->id
.qp_type
);
2179 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
2181 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2182 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2183 rdma_addr_size(cma_src_addr(id_priv
)));
2185 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2186 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2187 atomic_inc(&id_priv
->refcount
);
2188 dev_id_priv
->internal_id
= 1;
2189 dev_id_priv
->afonly
= id_priv
->afonly
;
2191 ret
= rdma_listen(id
, id_priv
->backlog
);
2193 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
2194 ret
, cma_dev
->device
->name
);
2197 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2199 struct cma_device
*cma_dev
;
2202 list_add_tail(&id_priv
->list
, &listen_any_list
);
2203 list_for_each_entry(cma_dev
, &dev_list
, list
)
2204 cma_listen_on_dev(id_priv
, cma_dev
);
2205 mutex_unlock(&lock
);
2208 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2210 struct rdma_id_private
*id_priv
;
2212 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2213 id_priv
->tos
= (u8
) tos
;
2215 EXPORT_SYMBOL(rdma_set_service_type
);
2217 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
2220 struct cma_work
*work
= context
;
2221 struct rdma_route
*route
;
2223 route
= &work
->id
->id
.route
;
2226 route
->num_paths
= 1;
2227 *route
->path_rec
= *path_rec
;
2229 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2230 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2231 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2232 work
->event
.status
= status
;
2235 queue_work(cma_wq
, &work
->work
);
2238 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
2239 struct cma_work
*work
)
2241 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2242 struct ib_sa_path_rec path_rec
;
2243 ib_sa_comp_mask comp_mask
;
2244 struct sockaddr_in6
*sin6
;
2245 struct sockaddr_ib
*sib
;
2247 memset(&path_rec
, 0, sizeof path_rec
);
2248 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2249 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2250 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2251 path_rec
.numb_path
= 1;
2252 path_rec
.reversible
= 1;
2253 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
2255 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2256 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2257 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2259 switch (cma_family(id_priv
)) {
2261 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2262 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2265 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2266 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2267 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2270 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2271 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2272 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2276 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2277 id_priv
->id
.port_num
, &path_rec
,
2278 comp_mask
, timeout_ms
,
2279 GFP_KERNEL
, cma_query_handler
,
2280 work
, &id_priv
->query
);
2282 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2285 static void cma_work_handler(struct work_struct
*_work
)
2287 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2288 struct rdma_id_private
*id_priv
= work
->id
;
2291 mutex_lock(&id_priv
->handler_mutex
);
2292 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2295 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2296 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2300 mutex_unlock(&id_priv
->handler_mutex
);
2301 cma_deref_id(id_priv
);
2303 rdma_destroy_id(&id_priv
->id
);
2307 static void cma_ndev_work_handler(struct work_struct
*_work
)
2309 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2310 struct rdma_id_private
*id_priv
= work
->id
;
2313 mutex_lock(&id_priv
->handler_mutex
);
2314 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2315 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2318 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2319 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2324 mutex_unlock(&id_priv
->handler_mutex
);
2325 cma_deref_id(id_priv
);
2327 rdma_destroy_id(&id_priv
->id
);
2331 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2333 struct rdma_route
*route
= &id_priv
->id
.route
;
2334 struct cma_work
*work
;
2337 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2342 INIT_WORK(&work
->work
, cma_work_handler
);
2343 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2344 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2345 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2347 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2348 if (!route
->path_rec
) {
2353 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2359 kfree(route
->path_rec
);
2360 route
->path_rec
= NULL
;
2366 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
2367 struct ib_sa_path_rec
*path_rec
, int num_paths
)
2369 struct rdma_id_private
*id_priv
;
2372 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2373 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2374 RDMA_CM_ROUTE_RESOLVED
))
2377 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
2379 if (!id
->route
.path_rec
) {
2384 id
->route
.num_paths
= num_paths
;
2387 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2390 EXPORT_SYMBOL(rdma_set_ib_paths
);
2392 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2394 struct cma_work
*work
;
2396 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2401 INIT_WORK(&work
->work
, cma_work_handler
);
2402 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2403 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2404 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2405 queue_work(cma_wq
, &work
->work
);
2409 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2412 struct net_device
*dev
;
2414 prio
= rt_tos2priority(tos
);
2415 dev
= ndev
->priv_flags
& IFF_802_1Q_VLAN
?
2416 vlan_dev_real_dev(ndev
) : ndev
;
2419 return netdev_get_prio_tc_map(dev
, prio
);
2421 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2422 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
)
2423 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
2424 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2429 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2431 struct rdma_route
*route
= &id_priv
->id
.route
;
2432 struct rdma_addr
*addr
= &route
->addr
;
2433 struct cma_work
*work
;
2435 struct net_device
*ndev
= NULL
;
2438 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2443 INIT_WORK(&work
->work
, cma_work_handler
);
2445 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2446 if (!route
->path_rec
) {
2451 route
->num_paths
= 1;
2453 if (addr
->dev_addr
.bound_dev_if
) {
2454 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
2458 if (ndev
->flags
& IFF_LOOPBACK
) {
2460 if (!id_priv
->id
.device
->get_netdev
)
2463 ndev
= id_priv
->id
.device
->get_netdev(id_priv
->id
.device
,
2464 id_priv
->id
.port_num
);
2469 route
->path_rec
->net
= &init_net
;
2470 route
->path_rec
->ifindex
= ndev
->ifindex
;
2471 route
->path_rec
->gid_type
= id_priv
->gid_type
;
2478 memcpy(route
->path_rec
->dmac
, addr
->dev_addr
.dst_dev_addr
, ETH_ALEN
);
2480 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2481 &route
->path_rec
->sgid
);
2482 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2483 &route
->path_rec
->dgid
);
2485 /* Use the hint from IP Stack to select GID Type */
2486 if (route
->path_rec
->gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
2487 route
->path_rec
->gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
2488 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
2489 /* TODO: get the hoplimit from the inet/inet6 device */
2490 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
2492 route
->path_rec
->hop_limit
= 1;
2493 route
->path_rec
->reversible
= 1;
2494 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2495 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2496 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, id_priv
->tos
);
2497 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2498 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2499 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2501 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2502 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
2503 if (!route
->path_rec
->mtu
) {
2508 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2509 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2510 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2511 work
->event
.status
= 0;
2513 queue_work(cma_wq
, &work
->work
);
2518 kfree(route
->path_rec
);
2519 route
->path_rec
= NULL
;
2525 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
2527 struct rdma_id_private
*id_priv
;
2530 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2531 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
2534 atomic_inc(&id_priv
->refcount
);
2535 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
2536 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2537 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
2538 ret
= cma_resolve_iboe_route(id_priv
);
2539 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
2540 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2549 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2550 cma_deref_id(id_priv
);
2553 EXPORT_SYMBOL(rdma_resolve_route
);
2555 static void cma_set_loopback(struct sockaddr
*addr
)
2557 switch (addr
->sa_family
) {
2559 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2562 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2566 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2572 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2574 struct cma_device
*cma_dev
, *cur_dev
;
2575 struct ib_port_attr port_attr
;
2583 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2584 if (cma_family(id_priv
) == AF_IB
&&
2585 !rdma_cap_ib_cm(cur_dev
->device
, 1))
2591 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2592 if (!ib_query_port(cur_dev
->device
, p
, &port_attr
) &&
2593 port_attr
.state
== IB_PORT_ACTIVE
) {
2608 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
, NULL
);
2612 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2616 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2617 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
2618 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2620 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2621 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2622 id_priv
->id
.port_num
= p
;
2623 cma_attach_to_dev(id_priv
, cma_dev
);
2624 cma_set_loopback(cma_src_addr(id_priv
));
2626 mutex_unlock(&lock
);
2630 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2631 struct rdma_dev_addr
*dev_addr
, void *context
)
2633 struct rdma_id_private
*id_priv
= context
;
2634 struct rdma_cm_event event
;
2636 memset(&event
, 0, sizeof event
);
2637 mutex_lock(&id_priv
->handler_mutex
);
2638 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2639 RDMA_CM_ADDR_RESOLVED
))
2642 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2643 if (!status
&& !id_priv
->cma_dev
)
2644 status
= cma_acquire_dev(id_priv
, NULL
);
2647 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2648 RDMA_CM_ADDR_BOUND
))
2650 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2651 event
.status
= status
;
2653 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2655 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2656 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2657 mutex_unlock(&id_priv
->handler_mutex
);
2658 cma_deref_id(id_priv
);
2659 rdma_destroy_id(&id_priv
->id
);
2663 mutex_unlock(&id_priv
->handler_mutex
);
2664 cma_deref_id(id_priv
);
2667 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2669 struct cma_work
*work
;
2673 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2677 if (!id_priv
->cma_dev
) {
2678 ret
= cma_bind_loopback(id_priv
);
2683 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2684 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2687 INIT_WORK(&work
->work
, cma_work_handler
);
2688 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2689 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2690 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2691 queue_work(cma_wq
, &work
->work
);
2698 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2700 struct cma_work
*work
;
2703 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2707 if (!id_priv
->cma_dev
) {
2708 ret
= cma_resolve_ib_dev(id_priv
);
2713 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2714 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2717 INIT_WORK(&work
->work
, cma_work_handler
);
2718 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2719 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2720 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2721 queue_work(cma_wq
, &work
->work
);
2728 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2729 struct sockaddr
*dst_addr
)
2731 if (!src_addr
|| !src_addr
->sa_family
) {
2732 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2733 src_addr
->sa_family
= dst_addr
->sa_family
;
2734 if (dst_addr
->sa_family
== AF_INET6
) {
2735 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
2736 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
2737 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
2738 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
2739 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
2740 } else if (dst_addr
->sa_family
== AF_IB
) {
2741 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2742 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2745 return rdma_bind_addr(id
, src_addr
);
2748 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2749 struct sockaddr
*dst_addr
, int timeout_ms
)
2751 struct rdma_id_private
*id_priv
;
2754 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2755 if (id_priv
->state
== RDMA_CM_IDLE
) {
2756 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2761 if (cma_family(id_priv
) != dst_addr
->sa_family
)
2764 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2767 atomic_inc(&id_priv
->refcount
);
2768 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2769 if (cma_any_addr(dst_addr
)) {
2770 ret
= cma_resolve_loopback(id_priv
);
2772 if (dst_addr
->sa_family
== AF_IB
) {
2773 ret
= cma_resolve_ib_addr(id_priv
);
2775 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2776 dst_addr
, &id
->route
.addr
.dev_addr
,
2777 timeout_ms
, addr_handler
, id_priv
);
2785 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2786 cma_deref_id(id_priv
);
2789 EXPORT_SYMBOL(rdma_resolve_addr
);
2791 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2793 struct rdma_id_private
*id_priv
;
2794 unsigned long flags
;
2797 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2798 spin_lock_irqsave(&id_priv
->lock
, flags
);
2799 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2800 id_priv
->reuseaddr
= reuse
;
2805 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2808 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2810 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2812 struct rdma_id_private
*id_priv
;
2813 unsigned long flags
;
2816 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2817 spin_lock_irqsave(&id_priv
->lock
, flags
);
2818 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2819 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2820 id_priv
->afonly
= afonly
;
2825 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2828 EXPORT_SYMBOL(rdma_set_afonly
);
2830 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2831 struct rdma_id_private
*id_priv
)
2833 struct sockaddr
*addr
;
2834 struct sockaddr_ib
*sib
;
2838 addr
= cma_src_addr(id_priv
);
2839 port
= htons(bind_list
->port
);
2841 switch (addr
->sa_family
) {
2843 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2846 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2849 sib
= (struct sockaddr_ib
*) addr
;
2850 sid
= be64_to_cpu(sib
->sib_sid
);
2851 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2852 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2853 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2856 id_priv
->bind_list
= bind_list
;
2857 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2860 static int cma_alloc_port(enum rdma_port_space ps
,
2861 struct rdma_id_private
*id_priv
, unsigned short snum
)
2863 struct rdma_bind_list
*bind_list
;
2866 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2870 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
2876 bind_list
->port
= (unsigned short)ret
;
2877 cma_bind_port(bind_list
, id_priv
);
2881 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
2884 static int cma_alloc_any_port(enum rdma_port_space ps
,
2885 struct rdma_id_private
*id_priv
)
2887 static unsigned int last_used_port
;
2888 int low
, high
, remaining
;
2890 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2892 inet_get_local_port_range(net
, &low
, &high
);
2893 remaining
= (high
- low
) + 1;
2894 rover
= prandom_u32() % remaining
+ low
;
2896 if (last_used_port
!= rover
&&
2897 !cma_ps_find(net
, ps
, (unsigned short)rover
)) {
2898 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2900 * Remember previously used port number in order to avoid
2901 * re-using same port immediately after it is closed.
2904 last_used_port
= rover
;
2905 if (ret
!= -EADDRNOTAVAIL
)
2910 if ((rover
< low
) || (rover
> high
))
2914 return -EADDRNOTAVAIL
;
2918 * Check that the requested port is available. This is called when trying to
2919 * bind to a specific port, or when trying to listen on a bound port. In
2920 * the latter case, the provided id_priv may already be on the bind_list, but
2921 * we still need to check that it's okay to start listening.
2923 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2924 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2926 struct rdma_id_private
*cur_id
;
2927 struct sockaddr
*addr
, *cur_addr
;
2929 addr
= cma_src_addr(id_priv
);
2930 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
2931 if (id_priv
== cur_id
)
2934 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
2938 cur_addr
= cma_src_addr(cur_id
);
2939 if (id_priv
->afonly
&& cur_id
->afonly
&&
2940 (addr
->sa_family
!= cur_addr
->sa_family
))
2943 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
2944 return -EADDRNOTAVAIL
;
2946 if (!cma_addr_cmp(addr
, cur_addr
))
2952 static int cma_use_port(enum rdma_port_space ps
,
2953 struct rdma_id_private
*id_priv
)
2955 struct rdma_bind_list
*bind_list
;
2956 unsigned short snum
;
2959 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
2960 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2963 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
2965 ret
= cma_alloc_port(ps
, id_priv
, snum
);
2967 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
2969 cma_bind_port(bind_list
, id_priv
);
2974 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
2976 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
2980 if (bind_list
->owners
.first
->next
)
2981 ret
= cma_check_port(bind_list
, id_priv
, 0);
2982 mutex_unlock(&lock
);
2986 static enum rdma_port_space
cma_select_inet_ps(
2987 struct rdma_id_private
*id_priv
)
2989 switch (id_priv
->id
.ps
) {
2994 return id_priv
->id
.ps
;
3001 static enum rdma_port_space
cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3003 enum rdma_port_space ps
= 0;
3004 struct sockaddr_ib
*sib
;
3005 u64 sid_ps
, mask
, sid
;
3007 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3008 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3009 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3011 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3012 sid_ps
= RDMA_IB_IP_PS_IB
;
3014 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3015 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3016 sid_ps
= RDMA_IB_IP_PS_TCP
;
3018 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3019 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3020 sid_ps
= RDMA_IB_IP_PS_UDP
;
3025 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3026 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3027 be64_to_cpu(sib
->sib_sid_mask
));
3032 static int cma_get_port(struct rdma_id_private
*id_priv
)
3034 enum rdma_port_space ps
;
3037 if (cma_family(id_priv
) != AF_IB
)
3038 ps
= cma_select_inet_ps(id_priv
);
3040 ps
= cma_select_ib_ps(id_priv
);
3042 return -EPROTONOSUPPORT
;
3045 if (cma_any_port(cma_src_addr(id_priv
)))
3046 ret
= cma_alloc_any_port(ps
, id_priv
);
3048 ret
= cma_use_port(ps
, id_priv
);
3049 mutex_unlock(&lock
);
3054 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3055 struct sockaddr
*addr
)
3057 #if IS_ENABLED(CONFIG_IPV6)
3058 struct sockaddr_in6
*sin6
;
3060 if (addr
->sa_family
!= AF_INET6
)
3063 sin6
= (struct sockaddr_in6
*) addr
;
3065 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3068 if (!sin6
->sin6_scope_id
)
3071 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3076 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3078 struct rdma_id_private
*id_priv
;
3081 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3082 if (id_priv
->state
== RDMA_CM_IDLE
) {
3083 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
3084 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
3089 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
3092 if (id_priv
->reuseaddr
) {
3093 ret
= cma_bind_listen(id_priv
);
3098 id_priv
->backlog
= backlog
;
3100 if (rdma_cap_ib_cm(id
->device
, 1)) {
3101 ret
= cma_ib_listen(id_priv
);
3104 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3105 ret
= cma_iw_listen(id_priv
, backlog
);
3113 cma_listen_on_all(id_priv
);
3117 id_priv
->backlog
= 0;
3118 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3121 EXPORT_SYMBOL(rdma_listen
);
3123 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3125 struct rdma_id_private
*id_priv
;
3128 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3129 addr
->sa_family
!= AF_IB
)
3130 return -EAFNOSUPPORT
;
3132 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3133 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3136 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
3140 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3141 if (!cma_any_addr(addr
)) {
3142 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
3146 ret
= cma_acquire_dev(id_priv
, NULL
);
3151 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
3152 if (addr
->sa_family
== AF_INET
)
3153 id_priv
->afonly
= 1;
3154 #if IS_ENABLED(CONFIG_IPV6)
3155 else if (addr
->sa_family
== AF_INET6
) {
3156 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3158 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
3162 ret
= cma_get_port(id_priv
);
3168 if (id_priv
->cma_dev
)
3169 cma_release_dev(id_priv
);
3171 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
3174 EXPORT_SYMBOL(rdma_bind_addr
);
3176 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
3178 struct cma_hdr
*cma_hdr
;
3181 cma_hdr
->cma_version
= CMA_VERSION
;
3182 if (cma_family(id_priv
) == AF_INET
) {
3183 struct sockaddr_in
*src4
, *dst4
;
3185 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
3186 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
3188 cma_set_ip_ver(cma_hdr
, 4);
3189 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
3190 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
3191 cma_hdr
->port
= src4
->sin_port
;
3192 } else if (cma_family(id_priv
) == AF_INET6
) {
3193 struct sockaddr_in6
*src6
, *dst6
;
3195 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
3196 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
3198 cma_set_ip_ver(cma_hdr
, 6);
3199 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
3200 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
3201 cma_hdr
->port
= src6
->sin6_port
;
3206 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
3207 struct ib_cm_event
*ib_event
)
3209 struct rdma_id_private
*id_priv
= cm_id
->context
;
3210 struct rdma_cm_event event
;
3211 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
3214 mutex_lock(&id_priv
->handler_mutex
);
3215 if (id_priv
->state
!= RDMA_CM_CONNECT
)
3218 memset(&event
, 0, sizeof event
);
3219 switch (ib_event
->event
) {
3220 case IB_CM_SIDR_REQ_ERROR
:
3221 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3222 event
.status
= -ETIMEDOUT
;
3224 case IB_CM_SIDR_REP_RECEIVED
:
3225 event
.param
.ud
.private_data
= ib_event
->private_data
;
3226 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3227 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3228 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3229 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3232 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3234 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3238 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
3239 id_priv
->id
.route
.path_rec
,
3240 &event
.param
.ud
.ah_attr
);
3241 event
.param
.ud
.qp_num
= rep
->qpn
;
3242 event
.param
.ud
.qkey
= rep
->qkey
;
3243 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3247 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3252 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3254 /* Destroy the CM ID by returning a non-zero value. */
3255 id_priv
->cm_id
.ib
= NULL
;
3256 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3257 mutex_unlock(&id_priv
->handler_mutex
);
3258 rdma_destroy_id(&id_priv
->id
);
3262 mutex_unlock(&id_priv
->handler_mutex
);
3266 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3267 struct rdma_conn_param
*conn_param
)
3269 struct ib_cm_sidr_req_param req
;
3270 struct ib_cm_id
*id
;
3274 memset(&req
, 0, sizeof req
);
3275 offset
= cma_user_data_offset(id_priv
);
3276 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3277 if (req
.private_data_len
< conn_param
->private_data_len
)
3280 if (req
.private_data_len
) {
3281 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3285 private_data
= NULL
;
3288 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3289 memcpy(private_data
+ offset
, conn_param
->private_data
,
3290 conn_param
->private_data_len
);
3293 ret
= cma_format_hdr(private_data
, id_priv
);
3296 req
.private_data
= private_data
;
3299 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3305 id_priv
->cm_id
.ib
= id
;
3307 req
.path
= id_priv
->id
.route
.path_rec
;
3308 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3309 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3310 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3312 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3314 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3315 id_priv
->cm_id
.ib
= NULL
;
3318 kfree(private_data
);
3322 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3323 struct rdma_conn_param
*conn_param
)
3325 struct ib_cm_req_param req
;
3326 struct rdma_route
*route
;
3328 struct ib_cm_id
*id
;
3331 memset(&req
, 0, sizeof req
);
3332 offset
= cma_user_data_offset(id_priv
);
3333 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3334 if (req
.private_data_len
< conn_param
->private_data_len
)
3337 if (req
.private_data_len
) {
3338 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3342 private_data
= NULL
;
3345 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3346 memcpy(private_data
+ offset
, conn_param
->private_data
,
3347 conn_param
->private_data_len
);
3349 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3354 id_priv
->cm_id
.ib
= id
;
3356 route
= &id_priv
->id
.route
;
3358 ret
= cma_format_hdr(private_data
, id_priv
);
3361 req
.private_data
= private_data
;
3364 req
.primary_path
= &route
->path_rec
[0];
3365 if (route
->num_paths
== 2)
3366 req
.alternate_path
= &route
->path_rec
[1];
3368 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3369 req
.qp_num
= id_priv
->qp_num
;
3370 req
.qp_type
= id_priv
->id
.qp_type
;
3371 req
.starting_psn
= id_priv
->seq_num
;
3372 req
.responder_resources
= conn_param
->responder_resources
;
3373 req
.initiator_depth
= conn_param
->initiator_depth
;
3374 req
.flow_control
= conn_param
->flow_control
;
3375 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3376 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3377 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3378 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3379 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3380 req
.srq
= id_priv
->srq
? 1 : 0;
3382 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3384 if (ret
&& !IS_ERR(id
)) {
3385 ib_destroy_cm_id(id
);
3386 id_priv
->cm_id
.ib
= NULL
;
3389 kfree(private_data
);
3393 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3394 struct rdma_conn_param
*conn_param
)
3396 struct iw_cm_id
*cm_id
;
3398 struct iw_cm_conn_param iw_param
;
3400 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
3402 return PTR_ERR(cm_id
);
3404 cm_id
->tos
= id_priv
->tos
;
3405 id_priv
->cm_id
.iw
= cm_id
;
3407 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
3408 rdma_addr_size(cma_src_addr(id_priv
)));
3409 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
3410 rdma_addr_size(cma_dst_addr(id_priv
)));
3412 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3417 iw_param
.ord
= conn_param
->initiator_depth
;
3418 iw_param
.ird
= conn_param
->responder_resources
;
3419 iw_param
.private_data
= conn_param
->private_data
;
3420 iw_param
.private_data_len
= conn_param
->private_data_len
;
3421 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
3423 memset(&iw_param
, 0, sizeof iw_param
);
3424 iw_param
.qpn
= id_priv
->qp_num
;
3426 ret
= iw_cm_connect(cm_id
, &iw_param
);
3429 iw_destroy_cm_id(cm_id
);
3430 id_priv
->cm_id
.iw
= NULL
;
3435 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3437 struct rdma_id_private
*id_priv
;
3440 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3441 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
3445 id_priv
->qp_num
= conn_param
->qp_num
;
3446 id_priv
->srq
= conn_param
->srq
;
3449 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3450 if (id
->qp_type
== IB_QPT_UD
)
3451 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
3453 ret
= cma_connect_ib(id_priv
, conn_param
);
3454 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3455 ret
= cma_connect_iw(id_priv
, conn_param
);
3463 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
3466 EXPORT_SYMBOL(rdma_connect
);
3468 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
3469 struct rdma_conn_param
*conn_param
)
3471 struct ib_cm_rep_param rep
;
3474 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3478 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
3482 memset(&rep
, 0, sizeof rep
);
3483 rep
.qp_num
= id_priv
->qp_num
;
3484 rep
.starting_psn
= id_priv
->seq_num
;
3485 rep
.private_data
= conn_param
->private_data
;
3486 rep
.private_data_len
= conn_param
->private_data_len
;
3487 rep
.responder_resources
= conn_param
->responder_resources
;
3488 rep
.initiator_depth
= conn_param
->initiator_depth
;
3489 rep
.failover_accepted
= 0;
3490 rep
.flow_control
= conn_param
->flow_control
;
3491 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3492 rep
.srq
= id_priv
->srq
? 1 : 0;
3494 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
3499 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
3500 struct rdma_conn_param
*conn_param
)
3502 struct iw_cm_conn_param iw_param
;
3505 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3509 iw_param
.ord
= conn_param
->initiator_depth
;
3510 iw_param
.ird
= conn_param
->responder_resources
;
3511 iw_param
.private_data
= conn_param
->private_data
;
3512 iw_param
.private_data_len
= conn_param
->private_data_len
;
3513 if (id_priv
->id
.qp
) {
3514 iw_param
.qpn
= id_priv
->qp_num
;
3516 iw_param
.qpn
= conn_param
->qp_num
;
3518 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
3521 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
3522 enum ib_cm_sidr_status status
, u32 qkey
,
3523 const void *private_data
, int private_data_len
)
3525 struct ib_cm_sidr_rep_param rep
;
3528 memset(&rep
, 0, sizeof rep
);
3529 rep
.status
= status
;
3530 if (status
== IB_SIDR_SUCCESS
) {
3531 ret
= cma_set_qkey(id_priv
, qkey
);
3534 rep
.qp_num
= id_priv
->qp_num
;
3535 rep
.qkey
= id_priv
->qkey
;
3537 rep
.private_data
= private_data
;
3538 rep
.private_data_len
= private_data_len
;
3540 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3543 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3545 struct rdma_id_private
*id_priv
;
3548 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3550 id_priv
->owner
= task_pid_nr(current
);
3552 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3555 if (!id
->qp
&& conn_param
) {
3556 id_priv
->qp_num
= conn_param
->qp_num
;
3557 id_priv
->srq
= conn_param
->srq
;
3560 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3561 if (id
->qp_type
== IB_QPT_UD
) {
3563 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3565 conn_param
->private_data
,
3566 conn_param
->private_data_len
);
3568 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3572 ret
= cma_accept_ib(id_priv
, conn_param
);
3574 ret
= cma_rep_recv(id_priv
);
3576 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3577 ret
= cma_accept_iw(id_priv
, conn_param
);
3586 cma_modify_qp_err(id_priv
);
3587 rdma_reject(id
, NULL
, 0);
3590 EXPORT_SYMBOL(rdma_accept
);
3592 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3594 struct rdma_id_private
*id_priv
;
3597 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3598 if (!id_priv
->cm_id
.ib
)
3601 switch (id
->device
->node_type
) {
3602 case RDMA_NODE_IB_CA
:
3603 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3611 EXPORT_SYMBOL(rdma_notify
);
3613 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3614 u8 private_data_len
)
3616 struct rdma_id_private
*id_priv
;
3619 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3620 if (!id_priv
->cm_id
.ib
)
3623 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3624 if (id
->qp_type
== IB_QPT_UD
)
3625 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3626 private_data
, private_data_len
);
3628 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3629 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3630 0, private_data
, private_data_len
);
3631 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3632 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3633 private_data
, private_data_len
);
3639 EXPORT_SYMBOL(rdma_reject
);
3641 int rdma_disconnect(struct rdma_cm_id
*id
)
3643 struct rdma_id_private
*id_priv
;
3646 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3647 if (!id_priv
->cm_id
.ib
)
3650 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3651 ret
= cma_modify_qp_err(id_priv
);
3654 /* Initiate or respond to a disconnect. */
3655 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3656 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3657 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3658 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3665 EXPORT_SYMBOL(rdma_disconnect
);
3667 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3669 struct rdma_id_private
*id_priv
;
3670 struct cma_multicast
*mc
= multicast
->context
;
3671 struct rdma_cm_event event
;
3674 id_priv
= mc
->id_priv
;
3675 mutex_lock(&id_priv
->handler_mutex
);
3676 if (id_priv
->state
!= RDMA_CM_ADDR_BOUND
&&
3677 id_priv
->state
!= RDMA_CM_ADDR_RESOLVED
)
3681 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3682 mutex_lock(&id_priv
->qp_mutex
);
3683 if (!status
&& id_priv
->id
.qp
)
3684 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3685 be16_to_cpu(multicast
->rec
.mlid
));
3686 mutex_unlock(&id_priv
->qp_mutex
);
3688 memset(&event
, 0, sizeof event
);
3689 event
.status
= status
;
3690 event
.param
.ud
.private_data
= mc
->context
;
3692 struct rdma_dev_addr
*dev_addr
=
3693 &id_priv
->id
.route
.addr
.dev_addr
;
3694 struct net_device
*ndev
=
3695 dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3696 enum ib_gid_type gid_type
=
3697 id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3698 rdma_start_port(id_priv
->cma_dev
->device
)];
3700 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3701 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3702 id_priv
->id
.port_num
, &multicast
->rec
,
3704 &event
.param
.ud
.ah_attr
);
3705 event
.param
.ud
.qp_num
= 0xFFFFFF;
3706 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3710 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3712 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3714 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3715 mutex_unlock(&id_priv
->handler_mutex
);
3716 rdma_destroy_id(&id_priv
->id
);
3721 mutex_unlock(&id_priv
->handler_mutex
);
3725 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3726 struct sockaddr
*addr
, union ib_gid
*mgid
)
3728 unsigned char mc_map
[MAX_ADDR_LEN
];
3729 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3730 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3731 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3733 if (cma_any_addr(addr
)) {
3734 memset(mgid
, 0, sizeof *mgid
);
3735 } else if ((addr
->sa_family
== AF_INET6
) &&
3736 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3738 /* IPv6 address is an SA assigned MGID. */
3739 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3740 } else if (addr
->sa_family
== AF_IB
) {
3741 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3742 } else if ((addr
->sa_family
== AF_INET6
)) {
3743 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3744 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3745 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3746 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3748 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3749 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3750 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3751 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3755 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3756 struct cma_multicast
*mc
)
3758 struct ib_sa_mcmember_rec rec
;
3759 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3760 ib_sa_comp_mask comp_mask
;
3763 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3764 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3769 ret
= cma_set_qkey(id_priv
, 0);
3773 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3774 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3775 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3776 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3779 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3780 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3781 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3782 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3783 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3785 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3786 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3787 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3788 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3789 IB_SA_MCMEMBER_REC_MTU
|
3790 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3792 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3793 id_priv
->id
.port_num
, &rec
,
3794 comp_mask
, GFP_KERNEL
,
3795 cma_ib_mc_handler
, mc
);
3796 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
3799 static void iboe_mcast_work_handler(struct work_struct
*work
)
3801 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3802 struct cma_multicast
*mc
= mw
->mc
;
3803 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3805 mc
->multicast
.ib
->context
= mc
;
3806 cma_ib_mc_handler(0, m
);
3807 kref_put(&mc
->mcref
, release_mc
);
3811 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3813 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3814 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3816 if (cma_any_addr(addr
)) {
3817 memset(mgid
, 0, sizeof *mgid
);
3818 } else if (addr
->sa_family
== AF_INET6
) {
3819 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3821 mgid
->raw
[0] = 0xff;
3822 mgid
->raw
[1] = 0x0e;
3831 mgid
->raw
[10] = 0xff;
3832 mgid
->raw
[11] = 0xff;
3833 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3837 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3838 struct cma_multicast
*mc
)
3840 struct iboe_mcast_work
*work
;
3841 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3843 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3844 struct net_device
*ndev
= NULL
;
3845 enum ib_gid_type gid_type
;
3847 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3850 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3854 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3855 if (!mc
->multicast
.ib
) {
3860 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3862 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3863 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3864 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3866 if (dev_addr
->bound_dev_if
)
3867 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3872 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3873 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3874 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3876 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3877 rdma_start_port(id_priv
->cma_dev
->device
)];
3878 if (addr
->sa_family
== AF_INET
) {
3879 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
3880 mc
->multicast
.ib
->rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
3881 err
= cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
,
3884 mc
->igmp_joined
= true;
3887 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
3891 if (err
|| !mc
->multicast
.ib
->rec
.mtu
) {
3896 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
3897 &mc
->multicast
.ib
->rec
.port_gid
);
3900 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3901 kref_get(&mc
->mcref
);
3902 queue_work(cma_wq
, &work
->work
);
3907 kfree(mc
->multicast
.ib
);
3913 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3916 struct rdma_id_private
*id_priv
;
3917 struct cma_multicast
*mc
;
3920 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3921 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3922 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3925 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3929 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
3930 mc
->context
= context
;
3931 mc
->id_priv
= id_priv
;
3932 mc
->igmp_joined
= false;
3933 spin_lock(&id_priv
->lock
);
3934 list_add(&mc
->list
, &id_priv
->mc_list
);
3935 spin_unlock(&id_priv
->lock
);
3937 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
3938 kref_init(&mc
->mcref
);
3939 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3940 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
))
3941 ret
= cma_join_ib_multicast(id_priv
, mc
);
3946 spin_lock_irq(&id_priv
->lock
);
3947 list_del(&mc
->list
);
3948 spin_unlock_irq(&id_priv
->lock
);
3953 EXPORT_SYMBOL(rdma_join_multicast
);
3955 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3957 struct rdma_id_private
*id_priv
;
3958 struct cma_multicast
*mc
;
3960 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3961 spin_lock_irq(&id_priv
->lock
);
3962 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3963 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
3964 list_del(&mc
->list
);
3965 spin_unlock_irq(&id_priv
->lock
);
3968 ib_detach_mcast(id
->qp
,
3969 &mc
->multicast
.ib
->rec
.mgid
,
3970 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
3972 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
3974 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
3975 ib_sa_free_multicast(mc
->multicast
.ib
);
3977 } else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
3978 if (mc
->igmp_joined
) {
3979 struct rdma_dev_addr
*dev_addr
=
3980 &id
->route
.addr
.dev_addr
;
3981 struct net_device
*ndev
= NULL
;
3983 if (dev_addr
->bound_dev_if
)
3984 ndev
= dev_get_by_index(&init_net
,
3985 dev_addr
->bound_dev_if
);
3988 &mc
->multicast
.ib
->rec
.mgid
,
3992 mc
->igmp_joined
= false;
3994 kref_put(&mc
->mcref
, release_mc
);
3999 spin_unlock_irq(&id_priv
->lock
);
4001 EXPORT_SYMBOL(rdma_leave_multicast
);
4003 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
4005 struct rdma_dev_addr
*dev_addr
;
4006 struct cma_ndev_work
*work
;
4008 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4010 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
4011 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
4012 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
4013 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4014 ndev
->name
, &id_priv
->id
);
4015 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4019 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
4021 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
4022 atomic_inc(&id_priv
->refcount
);
4023 queue_work(cma_wq
, &work
->work
);
4029 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
4032 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4033 struct cma_device
*cma_dev
;
4034 struct rdma_id_private
*id_priv
;
4035 int ret
= NOTIFY_DONE
;
4037 if (event
!= NETDEV_BONDING_FAILOVER
)
4040 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
4044 list_for_each_entry(cma_dev
, &dev_list
, list
)
4045 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4046 ret
= cma_netdev_change(ndev
, id_priv
);
4052 mutex_unlock(&lock
);
4056 static struct notifier_block cma_nb
= {
4057 .notifier_call
= cma_netdev_callback
4060 static void cma_add_one(struct ib_device
*device
)
4062 struct cma_device
*cma_dev
;
4063 struct rdma_id_private
*id_priv
;
4065 unsigned long supported_gids
= 0;
4067 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
4071 cma_dev
->device
= device
;
4072 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
4073 sizeof(*cma_dev
->default_gid_type
),
4075 if (!cma_dev
->default_gid_type
) {
4079 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
4080 supported_gids
= roce_gid_type_mask_support(device
, i
);
4081 WARN_ON(!supported_gids
);
4082 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4083 find_first_bit(&supported_gids
, BITS_PER_LONG
);
4086 init_completion(&cma_dev
->comp
);
4087 atomic_set(&cma_dev
->refcount
, 1);
4088 INIT_LIST_HEAD(&cma_dev
->id_list
);
4089 ib_set_client_data(device
, &cma_client
, cma_dev
);
4092 list_add_tail(&cma_dev
->list
, &dev_list
);
4093 list_for_each_entry(id_priv
, &listen_any_list
, list
)
4094 cma_listen_on_dev(id_priv
, cma_dev
);
4095 mutex_unlock(&lock
);
4098 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
4100 struct rdma_cm_event event
;
4101 enum rdma_cm_state state
;
4104 /* Record that we want to remove the device */
4105 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
4106 if (state
== RDMA_CM_DESTROYING
)
4109 cma_cancel_operation(id_priv
, state
);
4110 mutex_lock(&id_priv
->handler_mutex
);
4112 /* Check for destruction from another callback. */
4113 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
4116 memset(&event
, 0, sizeof event
);
4117 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
4118 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
4120 mutex_unlock(&id_priv
->handler_mutex
);
4124 static void cma_process_remove(struct cma_device
*cma_dev
)
4126 struct rdma_id_private
*id_priv
;
4130 while (!list_empty(&cma_dev
->id_list
)) {
4131 id_priv
= list_entry(cma_dev
->id_list
.next
,
4132 struct rdma_id_private
, list
);
4134 list_del(&id_priv
->listen_list
);
4135 list_del_init(&id_priv
->list
);
4136 atomic_inc(&id_priv
->refcount
);
4137 mutex_unlock(&lock
);
4139 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
4140 cma_deref_id(id_priv
);
4142 rdma_destroy_id(&id_priv
->id
);
4146 mutex_unlock(&lock
);
4148 cma_deref_dev(cma_dev
);
4149 wait_for_completion(&cma_dev
->comp
);
4152 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
4154 struct cma_device
*cma_dev
= client_data
;
4160 list_del(&cma_dev
->list
);
4161 mutex_unlock(&lock
);
4163 cma_process_remove(cma_dev
);
4164 kfree(cma_dev
->default_gid_type
);
4168 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4170 struct nlmsghdr
*nlh
;
4171 struct rdma_cm_id_stats
*id_stats
;
4172 struct rdma_id_private
*id_priv
;
4173 struct rdma_cm_id
*id
= NULL
;
4174 struct cma_device
*cma_dev
;
4175 int i_dev
= 0, i_id
= 0;
4178 * We export all of the IDs as a sequence of messages. Each
4179 * ID gets its own netlink message.
4183 list_for_each_entry(cma_dev
, &dev_list
, list
) {
4184 if (i_dev
< cb
->args
[0]) {
4190 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4191 if (i_id
< cb
->args
[1]) {
4196 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
4197 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
4198 RDMA_NL_RDMA_CM_ID_STATS
,
4203 memset(id_stats
, 0, sizeof *id_stats
);
4205 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
4206 id_stats
->port_num
= id
->port_num
;
4207 id_stats
->bound_dev_if
=
4208 id
->route
.addr
.dev_addr
.bound_dev_if
;
4210 if (ibnl_put_attr(skb
, nlh
,
4211 rdma_addr_size(cma_src_addr(id_priv
)),
4212 cma_src_addr(id_priv
),
4213 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
4215 if (ibnl_put_attr(skb
, nlh
,
4216 rdma_addr_size(cma_src_addr(id_priv
)),
4217 cma_dst_addr(id_priv
),
4218 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
4221 id_stats
->pid
= id_priv
->owner
;
4222 id_stats
->port_space
= id
->ps
;
4223 id_stats
->cm_state
= id_priv
->state
;
4224 id_stats
->qp_num
= id_priv
->qp_num
;
4225 id_stats
->qp_type
= id
->qp_type
;
4235 mutex_unlock(&lock
);
4236 cb
->args
[0] = i_dev
;
4242 static const struct ibnl_client_cbs cma_cb_table
[] = {
4243 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
4244 .module
= THIS_MODULE
},
4247 static int cma_init_net(struct net
*net
)
4249 struct cma_pernet
*pernet
= cma_pernet(net
);
4251 idr_init(&pernet
->tcp_ps
);
4252 idr_init(&pernet
->udp_ps
);
4253 idr_init(&pernet
->ipoib_ps
);
4254 idr_init(&pernet
->ib_ps
);
4259 static void cma_exit_net(struct net
*net
)
4261 struct cma_pernet
*pernet
= cma_pernet(net
);
4263 idr_destroy(&pernet
->tcp_ps
);
4264 idr_destroy(&pernet
->udp_ps
);
4265 idr_destroy(&pernet
->ipoib_ps
);
4266 idr_destroy(&pernet
->ib_ps
);
4269 static struct pernet_operations cma_pernet_operations
= {
4270 .init
= cma_init_net
,
4271 .exit
= cma_exit_net
,
4272 .id
= &cma_pernet_id
,
4273 .size
= sizeof(struct cma_pernet
),
4276 static int __init
cma_init(void)
4280 cma_wq
= create_singlethread_workqueue("rdma_cm");
4284 ret
= register_pernet_subsys(&cma_pernet_operations
);
4288 ib_sa_register_client(&sa_client
);
4289 rdma_addr_register_client(&addr_client
);
4290 register_netdevice_notifier(&cma_nb
);
4292 ret
= ib_register_client(&cma_client
);
4296 if (ibnl_add_client(RDMA_NL_RDMA_CM
, ARRAY_SIZE(cma_cb_table
),
4298 pr_warn("RDMA CMA: failed to add netlink callback\n");
4299 cma_configfs_init();
4304 unregister_netdevice_notifier(&cma_nb
);
4305 rdma_addr_unregister_client(&addr_client
);
4306 ib_sa_unregister_client(&sa_client
);
4308 destroy_workqueue(cma_wq
);
4312 static void __exit
cma_cleanup(void)
4314 cma_configfs_exit();
4315 ibnl_remove_client(RDMA_NL_RDMA_CM
);
4316 ib_unregister_client(&cma_client
);
4317 unregister_netdevice_notifier(&cma_nb
);
4318 rdma_addr_unregister_client(&addr_client
);
4319 ib_sa_unregister_client(&sa_client
);
4320 unregister_pernet_subsys(&cma_pernet_operations
);
4321 destroy_workqueue(cma_wq
);
4324 module_init(cma_init
);
4325 module_exit(cma_cleanup
);