iw_cxgb4: enforce qp/cq id requirements
[deliverable/linux.git] / drivers / infiniband / core / cma.c
CommitLineData
e51060f0
SH
1/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
a9474917
SH
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
e51060f0 12 *
a9474917
SH
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
e51060f0 16 *
a9474917
SH
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
e51060f0 20 *
a9474917
SH
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
e51060f0 25 *
a9474917
SH
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
e51060f0
SH
34 */
35
36#include <linux/completion.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/mutex.h>
40#include <linux/random.h>
41#include <linux/idr.h>
07ebafba 42#include <linux/inetdevice.h>
5a0e3ad6 43#include <linux/slab.h>
e4dd23d7 44#include <linux/module.h>
366cddb4 45#include <net/route.h>
e51060f0
SH
46
47#include <net/tcp.h>
1f5175ad 48#include <net/ipv6.h>
e51060f0
SH
49
50#include <rdma/rdma_cm.h>
51#include <rdma/rdma_cm_ib.h>
753f618a 52#include <rdma/rdma_netlink.h>
2e2d190c 53#include <rdma/ib.h>
e51060f0
SH
54#include <rdma/ib_cache.h>
55#include <rdma/ib_cm.h>
56#include <rdma/ib_sa.h>
07ebafba 57#include <rdma/iw_cm.h>
e51060f0
SH
58
59MODULE_AUTHOR("Sean Hefty");
60MODULE_DESCRIPTION("Generic RDMA CM Agent");
61MODULE_LICENSE("Dual BSD/GPL");
62
63#define CMA_CM_RESPONSE_TIMEOUT 20
d5bb7599 64#define CMA_MAX_CM_RETRIES 15
dcb3f974 65#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
3c86aa70 66#define CMA_IBOE_PACKET_LIFETIME 18
e51060f0
SH
67
68static void cma_add_one(struct ib_device *device);
69static void cma_remove_one(struct ib_device *device);
70
71static struct ib_client cma_client = {
72 .name = "cma",
73 .add = cma_add_one,
74 .remove = cma_remove_one
75};
76
c1a0b23b 77static struct ib_sa_client sa_client;
7a118df3 78static struct rdma_addr_client addr_client;
e51060f0
SH
79static LIST_HEAD(dev_list);
80static LIST_HEAD(listen_any_list);
81static DEFINE_MUTEX(lock);
82static struct workqueue_struct *cma_wq;
e51060f0 83static DEFINE_IDR(tcp_ps);
628e5f6d 84static DEFINE_IDR(udp_ps);
c8f6a362 85static DEFINE_IDR(ipoib_ps);
2d2e9415 86static DEFINE_IDR(ib_ps);
e51060f0
SH
87
88struct cma_device {
89 struct list_head list;
90 struct ib_device *device;
e51060f0
SH
91 struct completion comp;
92 atomic_t refcount;
93 struct list_head id_list;
94};
95
e51060f0
SH
96struct rdma_bind_list {
97 struct idr *ps;
98 struct hlist_head owners;
99 unsigned short port;
100};
101
68602120
SH
102enum {
103 CMA_OPTION_AFONLY,
104};
105
e51060f0
SH
106/*
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
111 */
112struct rdma_id_private {
113 struct rdma_cm_id id;
114
115 struct rdma_bind_list *bind_list;
116 struct hlist_node node;
d02d1f53
SH
117 struct list_head list; /* listen_any_list or cma_device.list */
118 struct list_head listen_list; /* per device listens */
e51060f0 119 struct cma_device *cma_dev;
c8f6a362 120 struct list_head mc_list;
e51060f0 121
d02d1f53 122 int internal_id;
550e5ca7 123 enum rdma_cm_state state;
e51060f0 124 spinlock_t lock;
c5483388
SH
125 struct mutex qp_mutex;
126
e51060f0
SH
127 struct completion comp;
128 atomic_t refcount;
de910bd9 129 struct mutex handler_mutex;
e51060f0
SH
130
131 int backlog;
132 int timeout_ms;
133 struct ib_sa_query *query;
134 int query_id;
135 union {
136 struct ib_cm_id *ib;
07ebafba 137 struct iw_cm_id *iw;
e51060f0
SH
138 } cm_id;
139
140 u32 seq_num;
c8f6a362 141 u32 qkey;
e51060f0 142 u32 qp_num;
83e9502d 143 pid_t owner;
68602120 144 u32 options;
e51060f0 145 u8 srq;
a81c994d 146 u8 tos;
a9bb7912 147 u8 reuseaddr;
5b0ec991 148 u8 afonly;
e51060f0
SH
149};
150
c8f6a362
SH
151struct cma_multicast {
152 struct rdma_id_private *id_priv;
153 union {
154 struct ib_sa_multicast *ib;
155 } multicast;
156 struct list_head list;
157 void *context;
3f446754 158 struct sockaddr_storage addr;
3c86aa70 159 struct kref mcref;
c8f6a362
SH
160};
161
e51060f0
SH
162struct cma_work {
163 struct work_struct work;
164 struct rdma_id_private *id;
550e5ca7
NM
165 enum rdma_cm_state old_state;
166 enum rdma_cm_state new_state;
e51060f0
SH
167 struct rdma_cm_event event;
168};
169
dd5bdff8
OG
170struct cma_ndev_work {
171 struct work_struct work;
172 struct rdma_id_private *id;
173 struct rdma_cm_event event;
174};
175
3c86aa70
EC
176struct iboe_mcast_work {
177 struct work_struct work;
178 struct rdma_id_private *id;
179 struct cma_multicast *mc;
180};
181
e51060f0
SH
182union cma_ip_addr {
183 struct in6_addr ip6;
184 struct {
1b90c137
AV
185 __be32 pad[3];
186 __be32 addr;
e51060f0
SH
187 } ip4;
188};
189
190struct cma_hdr {
191 u8 cma_version;
192 u8 ip_version; /* IP version: 7:4 */
1b90c137 193 __be16 port;
e51060f0
SH
194 union cma_ip_addr src_addr;
195 union cma_ip_addr dst_addr;
196};
197
e51060f0 198#define CMA_VERSION 0x00
e51060f0 199
550e5ca7 200static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
e51060f0
SH
201{
202 unsigned long flags;
203 int ret;
204
205 spin_lock_irqsave(&id_priv->lock, flags);
206 ret = (id_priv->state == comp);
207 spin_unlock_irqrestore(&id_priv->lock, flags);
208 return ret;
209}
210
211static int cma_comp_exch(struct rdma_id_private *id_priv,
550e5ca7 212 enum rdma_cm_state comp, enum rdma_cm_state exch)
e51060f0
SH
213{
214 unsigned long flags;
215 int ret;
216
217 spin_lock_irqsave(&id_priv->lock, flags);
218 if ((ret = (id_priv->state == comp)))
219 id_priv->state = exch;
220 spin_unlock_irqrestore(&id_priv->lock, flags);
221 return ret;
222}
223
550e5ca7
NM
224static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
225 enum rdma_cm_state exch)
e51060f0
SH
226{
227 unsigned long flags;
550e5ca7 228 enum rdma_cm_state old;
e51060f0
SH
229
230 spin_lock_irqsave(&id_priv->lock, flags);
231 old = id_priv->state;
232 id_priv->state = exch;
233 spin_unlock_irqrestore(&id_priv->lock, flags);
234 return old;
235}
236
237static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
238{
239 return hdr->ip_version >> 4;
240}
241
242static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
243{
244 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
245}
246
e51060f0
SH
247static void cma_attach_to_dev(struct rdma_id_private *id_priv,
248 struct cma_device *cma_dev)
249{
250 atomic_inc(&cma_dev->refcount);
251 id_priv->cma_dev = cma_dev;
252 id_priv->id.device = cma_dev->device;
3c86aa70
EC
253 id_priv->id.route.addr.dev_addr.transport =
254 rdma_node_get_transport(cma_dev->device->node_type);
e51060f0
SH
255 list_add_tail(&id_priv->list, &cma_dev->id_list);
256}
257
258static inline void cma_deref_dev(struct cma_device *cma_dev)
259{
260 if (atomic_dec_and_test(&cma_dev->refcount))
261 complete(&cma_dev->comp);
262}
263
3c86aa70
EC
264static inline void release_mc(struct kref *kref)
265{
266 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
267
268 kfree(mc->multicast.ib);
269 kfree(mc);
270}
271
a396d43a 272static void cma_release_dev(struct rdma_id_private *id_priv)
e51060f0 273{
a396d43a 274 mutex_lock(&lock);
e51060f0
SH
275 list_del(&id_priv->list);
276 cma_deref_dev(id_priv->cma_dev);
277 id_priv->cma_dev = NULL;
a396d43a 278 mutex_unlock(&lock);
e51060f0
SH
279}
280
f4753834
SH
281static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
282{
283 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
284}
285
286static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
287{
288 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
289}
290
291static inline unsigned short cma_family(struct rdma_id_private *id_priv)
292{
293 return id_priv->id.route.addr.src_addr.ss_family;
294}
295
5c438135 296static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
c8f6a362
SH
297{
298 struct ib_sa_mcmember_rec rec;
299 int ret = 0;
300
5c438135
SH
301 if (id_priv->qkey) {
302 if (qkey && id_priv->qkey != qkey)
303 return -EINVAL;
d2ca39f2 304 return 0;
5c438135
SH
305 }
306
307 if (qkey) {
308 id_priv->qkey = qkey;
309 return 0;
310 }
d2ca39f2
YE
311
312 switch (id_priv->id.ps) {
c8f6a362 313 case RDMA_PS_UDP:
5c438135 314 case RDMA_PS_IB:
d2ca39f2 315 id_priv->qkey = RDMA_UDP_QKEY;
c8f6a362
SH
316 break;
317 case RDMA_PS_IPOIB:
d2ca39f2
YE
318 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
319 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
320 id_priv->id.port_num, &rec.mgid,
321 &rec);
322 if (!ret)
323 id_priv->qkey = be32_to_cpu(rec.qkey);
c8f6a362
SH
324 break;
325 default:
326 break;
327 }
328 return ret;
329}
330
680f920a
SH
331static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
332{
333 dev_addr->dev_type = ARPHRD_INFINIBAND;
334 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
335 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
336}
337
338static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
339{
340 int ret;
341
342 if (addr->sa_family != AF_IB) {
dd5f03be 343 ret = rdma_translate_ip(addr, dev_addr, NULL);
680f920a
SH
344 } else {
345 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
346 ret = 0;
347 }
348
349 return ret;
350}
351
be9130cc
DL
352static int cma_acquire_dev(struct rdma_id_private *id_priv,
353 struct rdma_id_private *listen_id_priv)
e51060f0 354{
c8f6a362 355 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
e51060f0 356 struct cma_device *cma_dev;
3c86aa70 357 union ib_gid gid, iboe_gid;
e51060f0 358 int ret = -ENODEV;
29f27e84 359 u8 port, found_port;
3c86aa70
EC
360 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
361 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
e51060f0 362
2efdd6a0
MS
363 if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
364 id_priv->id.ps == RDMA_PS_IPOIB)
365 return -EINVAL;
366
a396d43a 367 mutex_lock(&lock);
7b85627b
MS
368 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
369 &iboe_gid);
370
3c86aa70
EC
371 memcpy(&gid, dev_addr->src_dev_addr +
372 rdma_addr_gid_offset(dev_addr), sizeof gid);
be9130cc
DL
373 if (listen_id_priv &&
374 rdma_port_get_link_layer(listen_id_priv->id.device,
375 listen_id_priv->id.port_num) == dev_ll) {
376 cma_dev = listen_id_priv->cma_dev;
377 port = listen_id_priv->id.port_num;
378 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
379 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
380 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
381 &found_port, NULL);
382 else
383 ret = ib_find_cached_gid(cma_dev->device, &gid,
384 &found_port, NULL);
385
386 if (!ret && (port == found_port)) {
387 id_priv->id.port_num = found_port;
388 goto out;
389 }
390 }
e51060f0 391 list_for_each_entry(cma_dev, &dev_list, list) {
3c86aa70 392 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
be9130cc
DL
393 if (listen_id_priv &&
394 listen_id_priv->cma_dev == cma_dev &&
395 listen_id_priv->id.port_num == port)
396 continue;
3c86aa70
EC
397 if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
398 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
399 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
29f27e84 400 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
3c86aa70 401 else
29f27e84 402 ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
3c86aa70 403
29f27e84
DL
404 if (!ret && (port == found_port)) {
405 id_priv->id.port_num = found_port;
3c86aa70 406 goto out;
63f05be2 407 }
3c86aa70 408 }
e51060f0
SH
409 }
410 }
3c86aa70
EC
411
412out:
413 if (!ret)
414 cma_attach_to_dev(id_priv, cma_dev);
415
a396d43a 416 mutex_unlock(&lock);
e51060f0
SH
417 return ret;
418}
419
f17df3b0
SH
420/*
421 * Select the source IB device and address to reach the destination IB address.
422 */
423static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
424{
425 struct cma_device *cma_dev, *cur_dev;
426 struct sockaddr_ib *addr;
427 union ib_gid gid, sgid, *dgid;
428 u16 pkey, index;
8fb488d7 429 u8 p;
f17df3b0
SH
430 int i;
431
432 cma_dev = NULL;
433 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
434 dgid = (union ib_gid *) &addr->sib_addr;
435 pkey = ntohs(addr->sib_pkey);
436
437 list_for_each_entry(cur_dev, &dev_list, list) {
438 if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
439 continue;
440
441 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
442 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
443 continue;
444
445 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
446 if (!memcmp(&gid, dgid, sizeof(gid))) {
447 cma_dev = cur_dev;
448 sgid = gid;
8fb488d7 449 id_priv->id.port_num = p;
f17df3b0
SH
450 goto found;
451 }
452
453 if (!cma_dev && (gid.global.subnet_prefix ==
454 dgid->global.subnet_prefix)) {
455 cma_dev = cur_dev;
456 sgid = gid;
8fb488d7 457 id_priv->id.port_num = p;
f17df3b0
SH
458 }
459 }
460 }
461 }
462
463 if (!cma_dev)
464 return -ENODEV;
465
466found:
467 cma_attach_to_dev(id_priv, cma_dev);
f17df3b0
SH
468 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
469 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
470 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
471 return 0;
472}
473
e51060f0
SH
474static void cma_deref_id(struct rdma_id_private *id_priv)
475{
476 if (atomic_dec_and_test(&id_priv->refcount))
477 complete(&id_priv->comp);
478}
479
de910bd9 480static int cma_disable_callback(struct rdma_id_private *id_priv,
550e5ca7 481 enum rdma_cm_state state)
8aa08602 482{
de910bd9
OG
483 mutex_lock(&id_priv->handler_mutex);
484 if (id_priv->state != state) {
485 mutex_unlock(&id_priv->handler_mutex);
486 return -EINVAL;
487 }
488 return 0;
e51060f0
SH
489}
490
491struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
b26f9b99
SH
492 void *context, enum rdma_port_space ps,
493 enum ib_qp_type qp_type)
e51060f0
SH
494{
495 struct rdma_id_private *id_priv;
496
497 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
498 if (!id_priv)
499 return ERR_PTR(-ENOMEM);
500
83e9502d 501 id_priv->owner = task_pid_nr(current);
550e5ca7 502 id_priv->state = RDMA_CM_IDLE;
e51060f0
SH
503 id_priv->id.context = context;
504 id_priv->id.event_handler = event_handler;
505 id_priv->id.ps = ps;
b26f9b99 506 id_priv->id.qp_type = qp_type;
e51060f0 507 spin_lock_init(&id_priv->lock);
c5483388 508 mutex_init(&id_priv->qp_mutex);
e51060f0
SH
509 init_completion(&id_priv->comp);
510 atomic_set(&id_priv->refcount, 1);
de910bd9 511 mutex_init(&id_priv->handler_mutex);
e51060f0 512 INIT_LIST_HEAD(&id_priv->listen_list);
c8f6a362 513 INIT_LIST_HEAD(&id_priv->mc_list);
e51060f0
SH
514 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
515
516 return &id_priv->id;
517}
518EXPORT_SYMBOL(rdma_create_id);
519
c8f6a362 520static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
e51060f0
SH
521{
522 struct ib_qp_attr qp_attr;
c8f6a362 523 int qp_attr_mask, ret;
e51060f0 524
c8f6a362
SH
525 qp_attr.qp_state = IB_QPS_INIT;
526 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0
SH
527 if (ret)
528 return ret;
529
c8f6a362
SH
530 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
531 if (ret)
532 return ret;
533
534 qp_attr.qp_state = IB_QPS_RTR;
535 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
536 if (ret)
537 return ret;
538
539 qp_attr.qp_state = IB_QPS_RTS;
540 qp_attr.sq_psn = 0;
541 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
542
543 return ret;
e51060f0
SH
544}
545
c8f6a362 546static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
07ebafba
TT
547{
548 struct ib_qp_attr qp_attr;
c8f6a362 549 int qp_attr_mask, ret;
07ebafba
TT
550
551 qp_attr.qp_state = IB_QPS_INIT;
c8f6a362
SH
552 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
553 if (ret)
554 return ret;
07ebafba 555
c8f6a362 556 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
07ebafba
TT
557}
558
e51060f0
SH
559int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
560 struct ib_qp_init_attr *qp_init_attr)
561{
562 struct rdma_id_private *id_priv;
563 struct ib_qp *qp;
564 int ret;
565
566 id_priv = container_of(id, struct rdma_id_private, id);
567 if (id->device != pd->device)
568 return -EINVAL;
569
570 qp = ib_create_qp(pd, qp_init_attr);
571 if (IS_ERR(qp))
572 return PTR_ERR(qp);
573
b26f9b99 574 if (id->qp_type == IB_QPT_UD)
c8f6a362
SH
575 ret = cma_init_ud_qp(id_priv, qp);
576 else
577 ret = cma_init_conn_qp(id_priv, qp);
e51060f0
SH
578 if (ret)
579 goto err;
580
581 id->qp = qp;
582 id_priv->qp_num = qp->qp_num;
e51060f0
SH
583 id_priv->srq = (qp->srq != NULL);
584 return 0;
585err:
586 ib_destroy_qp(qp);
587 return ret;
588}
589EXPORT_SYMBOL(rdma_create_qp);
590
591void rdma_destroy_qp(struct rdma_cm_id *id)
592{
c5483388
SH
593 struct rdma_id_private *id_priv;
594
595 id_priv = container_of(id, struct rdma_id_private, id);
596 mutex_lock(&id_priv->qp_mutex);
597 ib_destroy_qp(id_priv->id.qp);
598 id_priv->id.qp = NULL;
599 mutex_unlock(&id_priv->qp_mutex);
e51060f0
SH
600}
601EXPORT_SYMBOL(rdma_destroy_qp);
602
5851bb89
SH
603static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
604 struct rdma_conn_param *conn_param)
e51060f0
SH
605{
606 struct ib_qp_attr qp_attr;
607 int qp_attr_mask, ret;
dd5f03be 608 union ib_gid sgid;
e51060f0 609
c5483388
SH
610 mutex_lock(&id_priv->qp_mutex);
611 if (!id_priv->id.qp) {
612 ret = 0;
613 goto out;
614 }
e51060f0
SH
615
616 /* Need to update QP attributes from default values. */
617 qp_attr.qp_state = IB_QPS_INIT;
c5483388 618 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 619 if (ret)
c5483388 620 goto out;
e51060f0 621
c5483388 622 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
e51060f0 623 if (ret)
c5483388 624 goto out;
e51060f0
SH
625
626 qp_attr.qp_state = IB_QPS_RTR;
c5483388 627 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 628 if (ret)
c5483388 629 goto out;
e51060f0 630
dd5f03be
MB
631 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
632 qp_attr.ah_attr.grh.sgid_index, &sgid);
633 if (ret)
634 goto out;
635
636 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
637 == RDMA_TRANSPORT_IB &&
638 rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
639 == IB_LINK_LAYER_ETHERNET) {
640 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
641
642 if (ret)
643 goto out;
644 }
5851bb89
SH
645 if (conn_param)
646 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
c5483388
SH
647 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
648out:
649 mutex_unlock(&id_priv->qp_mutex);
650 return ret;
e51060f0
SH
651}
652
5851bb89
SH
653static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
654 struct rdma_conn_param *conn_param)
e51060f0
SH
655{
656 struct ib_qp_attr qp_attr;
657 int qp_attr_mask, ret;
658
c5483388
SH
659 mutex_lock(&id_priv->qp_mutex);
660 if (!id_priv->id.qp) {
661 ret = 0;
662 goto out;
663 }
e51060f0
SH
664
665 qp_attr.qp_state = IB_QPS_RTS;
c5483388 666 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 667 if (ret)
c5483388 668 goto out;
e51060f0 669
5851bb89
SH
670 if (conn_param)
671 qp_attr.max_rd_atomic = conn_param->initiator_depth;
c5483388
SH
672 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
673out:
674 mutex_unlock(&id_priv->qp_mutex);
675 return ret;
e51060f0
SH
676}
677
c5483388 678static int cma_modify_qp_err(struct rdma_id_private *id_priv)
e51060f0
SH
679{
680 struct ib_qp_attr qp_attr;
c5483388 681 int ret;
e51060f0 682
c5483388
SH
683 mutex_lock(&id_priv->qp_mutex);
684 if (!id_priv->id.qp) {
685 ret = 0;
686 goto out;
687 }
e51060f0
SH
688
689 qp_attr.qp_state = IB_QPS_ERR;
c5483388
SH
690 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
691out:
692 mutex_unlock(&id_priv->qp_mutex);
693 return ret;
e51060f0
SH
694}
695
c8f6a362
SH
696static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
697 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
698{
699 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
700 int ret;
3c86aa70
EC
701 u16 pkey;
702
703 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
704 IB_LINK_LAYER_INFINIBAND)
705 pkey = ib_addr_get_pkey(dev_addr);
706 else
707 pkey = 0xffff;
c8f6a362
SH
708
709 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
3c86aa70 710 pkey, &qp_attr->pkey_index);
c8f6a362
SH
711 if (ret)
712 return ret;
713
714 qp_attr->port_num = id_priv->id.port_num;
715 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
716
b26f9b99 717 if (id_priv->id.qp_type == IB_QPT_UD) {
5c438135 718 ret = cma_set_qkey(id_priv, 0);
d2ca39f2
YE
719 if (ret)
720 return ret;
721
c8f6a362
SH
722 qp_attr->qkey = id_priv->qkey;
723 *qp_attr_mask |= IB_QP_QKEY;
724 } else {
725 qp_attr->qp_access_flags = 0;
726 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
727 }
728 return 0;
729}
730
e51060f0
SH
731int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
732 int *qp_attr_mask)
733{
734 struct rdma_id_private *id_priv;
c8f6a362 735 int ret = 0;
e51060f0
SH
736
737 id_priv = container_of(id, struct rdma_id_private, id);
07ebafba
TT
738 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
739 case RDMA_TRANSPORT_IB:
b26f9b99 740 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
c8f6a362
SH
741 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
742 else
743 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
744 qp_attr_mask);
dd5f03be 745
e51060f0
SH
746 if (qp_attr->qp_state == IB_QPS_RTR)
747 qp_attr->rq_psn = id_priv->seq_num;
748 break;
07ebafba 749 case RDMA_TRANSPORT_IWARP:
c8f6a362 750 if (!id_priv->cm_id.iw) {
8f076531 751 qp_attr->qp_access_flags = 0;
c8f6a362
SH
752 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
753 } else
754 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
755 qp_attr_mask);
07ebafba 756 break;
e51060f0
SH
757 default:
758 ret = -ENOSYS;
759 break;
760 }
761
762 return ret;
763}
764EXPORT_SYMBOL(rdma_init_qp_attr);
765
766static inline int cma_zero_addr(struct sockaddr *addr)
767{
2e2d190c
SH
768 switch (addr->sa_family) {
769 case AF_INET:
770 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
771 case AF_INET6:
772 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
773 case AF_IB:
774 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
775 default:
776 return 0;
e51060f0
SH
777 }
778}
779
780static inline int cma_loopback_addr(struct sockaddr *addr)
781{
2e2d190c
SH
782 switch (addr->sa_family) {
783 case AF_INET:
784 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
785 case AF_INET6:
786 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
787 case AF_IB:
788 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
789 default:
790 return 0;
791 }
e51060f0
SH
792}
793
794static inline int cma_any_addr(struct sockaddr *addr)
795{
796 return cma_zero_addr(addr) || cma_loopback_addr(addr);
797}
798
43b752da
HS
799static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
800{
801 if (src->sa_family != dst->sa_family)
802 return -1;
803
804 switch (src->sa_family) {
805 case AF_INET:
806 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
807 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
2e2d190c 808 case AF_INET6:
43b752da
HS
809 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
810 &((struct sockaddr_in6 *) dst)->sin6_addr);
2e2d190c
SH
811 default:
812 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
813 &((struct sockaddr_ib *) dst)->sib_addr);
43b752da
HS
814 }
815}
816
58afdcb7 817static __be16 cma_port(struct sockaddr *addr)
628e5f6d 818{
58afdcb7
SH
819 struct sockaddr_ib *sib;
820
821 switch (addr->sa_family) {
822 case AF_INET:
628e5f6d 823 return ((struct sockaddr_in *) addr)->sin_port;
58afdcb7 824 case AF_INET6:
628e5f6d 825 return ((struct sockaddr_in6 *) addr)->sin6_port;
58afdcb7
SH
826 case AF_IB:
827 sib = (struct sockaddr_ib *) addr;
828 return htons((u16) (be64_to_cpu(sib->sib_sid) &
829 be64_to_cpu(sib->sib_sid_mask)));
830 default:
831 return 0;
832 }
628e5f6d
SH
833}
834
e51060f0
SH
835static inline int cma_any_port(struct sockaddr *addr)
836{
628e5f6d 837 return !cma_port(addr);
e51060f0
SH
838}
839
fbaa1a6d
SH
840static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
841 struct ib_sa_path_rec *path)
e51060f0 842{
fbaa1a6d
SH
843 struct sockaddr_ib *listen_ib, *ib;
844
845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
847 ib->sib_family = listen_ib->sib_family;
848 ib->sib_pkey = path->pkey;
849 ib->sib_flowinfo = path->flow_label;
850 memcpy(&ib->sib_addr, &path->sgid, 16);
851 ib->sib_sid = listen_ib->sib_sid;
852 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
853 ib->sib_scope_id = listen_ib->sib_scope_id;
854
855 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
856 ib->sib_family = listen_ib->sib_family;
857 ib->sib_pkey = path->pkey;
858 ib->sib_flowinfo = path->flow_label;
859 memcpy(&ib->sib_addr, &path->dgid, 16);
860}
e51060f0 861
28521440
JG
862static __be16 ss_get_port(const struct sockaddr_storage *ss)
863{
864 if (ss->ss_family == AF_INET)
865 return ((struct sockaddr_in *)ss)->sin_port;
866 else if (ss->ss_family == AF_INET6)
867 return ((struct sockaddr_in6 *)ss)->sin6_port;
868 BUG();
869}
870
fbaa1a6d
SH
871static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
872 struct cma_hdr *hdr)
873{
28521440 874 struct sockaddr_in *ip4;
e51060f0 875
fbaa1a6d 876 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
28521440 877 ip4->sin_family = AF_INET;
fbaa1a6d 878 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
28521440 879 ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
fbaa1a6d
SH
880
881 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
28521440 882 ip4->sin_family = AF_INET;
fbaa1a6d
SH
883 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
884 ip4->sin_port = hdr->port;
e51060f0
SH
885}
886
fbaa1a6d
SH
887static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
888 struct cma_hdr *hdr)
e51060f0 889{
28521440 890 struct sockaddr_in6 *ip6;
e51060f0 891
fbaa1a6d 892 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
28521440 893 ip6->sin6_family = AF_INET6;
fbaa1a6d 894 ip6->sin6_addr = hdr->dst_addr.ip6;
28521440 895 ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
fbaa1a6d
SH
896
897 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
28521440 898 ip6->sin6_family = AF_INET6;
fbaa1a6d
SH
899 ip6->sin6_addr = hdr->src_addr.ip6;
900 ip6->sin6_port = hdr->port;
901}
902
903static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
904 struct ib_cm_event *ib_event)
905{
906 struct cma_hdr *hdr;
907
5eb695c1
SH
908 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
909 (ib_event->event == IB_CM_REQ_RECEIVED)) {
fbaa1a6d
SH
910 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
911 return 0;
912 }
913
914 hdr = ib_event->private_data;
915 if (hdr->cma_version != CMA_VERSION)
916 return -EINVAL;
917
918 switch (cma_get_ip_ver(hdr)) {
e51060f0 919 case 4:
fbaa1a6d 920 cma_save_ip4_info(id, listen_id, hdr);
e51060f0
SH
921 break;
922 case 6:
fbaa1a6d 923 cma_save_ip6_info(id, listen_id, hdr);
e51060f0
SH
924 break;
925 default:
fbaa1a6d 926 return -EINVAL;
e51060f0 927 }
fbaa1a6d 928 return 0;
e51060f0
SH
929}
930
e8160e15 931static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
e51060f0 932{
e8160e15 933 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
e51060f0
SH
934}
935
e51060f0
SH
936static void cma_cancel_route(struct rdma_id_private *id_priv)
937{
3c86aa70
EC
938 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
939 case IB_LINK_LAYER_INFINIBAND:
e51060f0
SH
940 if (id_priv->query)
941 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
942 break;
943 default:
944 break;
945 }
946}
947
e51060f0
SH
948static void cma_cancel_listens(struct rdma_id_private *id_priv)
949{
950 struct rdma_id_private *dev_id_priv;
951
d02d1f53
SH
952 /*
953 * Remove from listen_any_list to prevent added devices from spawning
954 * additional listen requests.
955 */
e51060f0
SH
956 mutex_lock(&lock);
957 list_del(&id_priv->list);
958
959 while (!list_empty(&id_priv->listen_list)) {
960 dev_id_priv = list_entry(id_priv->listen_list.next,
961 struct rdma_id_private, listen_list);
d02d1f53
SH
962 /* sync with device removal to avoid duplicate destruction */
963 list_del_init(&dev_id_priv->list);
964 list_del(&dev_id_priv->listen_list);
965 mutex_unlock(&lock);
966
967 rdma_destroy_id(&dev_id_priv->id);
968 mutex_lock(&lock);
e51060f0
SH
969 }
970 mutex_unlock(&lock);
971}
972
973static void cma_cancel_operation(struct rdma_id_private *id_priv,
550e5ca7 974 enum rdma_cm_state state)
e51060f0
SH
975{
976 switch (state) {
550e5ca7 977 case RDMA_CM_ADDR_QUERY:
e51060f0
SH
978 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
979 break;
550e5ca7 980 case RDMA_CM_ROUTE_QUERY:
e51060f0
SH
981 cma_cancel_route(id_priv);
982 break;
550e5ca7 983 case RDMA_CM_LISTEN:
f4753834 984 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
e51060f0
SH
985 cma_cancel_listens(id_priv);
986 break;
987 default:
988 break;
989 }
990}
991
992static void cma_release_port(struct rdma_id_private *id_priv)
993{
994 struct rdma_bind_list *bind_list = id_priv->bind_list;
995
996 if (!bind_list)
997 return;
998
999 mutex_lock(&lock);
1000 hlist_del(&id_priv->node);
1001 if (hlist_empty(&bind_list->owners)) {
1002 idr_remove(bind_list->ps, bind_list->port);
1003 kfree(bind_list);
1004 }
1005 mutex_unlock(&lock);
1006}
1007
c8f6a362
SH
1008static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1009{
1010 struct cma_multicast *mc;
1011
1012 while (!list_empty(&id_priv->mc_list)) {
1013 mc = container_of(id_priv->mc_list.next,
1014 struct cma_multicast, list);
1015 list_del(&mc->list);
3c86aa70
EC
1016 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
1017 case IB_LINK_LAYER_INFINIBAND:
1018 ib_sa_free_multicast(mc->multicast.ib);
1019 kfree(mc);
1020 break;
1021 case IB_LINK_LAYER_ETHERNET:
1022 kref_put(&mc->mcref, release_mc);
1023 break;
1024 default:
1025 break;
1026 }
c8f6a362
SH
1027 }
1028}
1029
e51060f0
SH
1030void rdma_destroy_id(struct rdma_cm_id *id)
1031{
1032 struct rdma_id_private *id_priv;
550e5ca7 1033 enum rdma_cm_state state;
e51060f0
SH
1034
1035 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 1036 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
e51060f0
SH
1037 cma_cancel_operation(id_priv, state);
1038
a396d43a
SH
1039 /*
1040 * Wait for any active callback to finish. New callbacks will find
1041 * the id_priv state set to destroying and abort.
1042 */
1043 mutex_lock(&id_priv->handler_mutex);
1044 mutex_unlock(&id_priv->handler_mutex);
1045
e51060f0 1046 if (id_priv->cma_dev) {
3c86aa70 1047 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
07ebafba 1048 case RDMA_TRANSPORT_IB:
0c9361fc 1049 if (id_priv->cm_id.ib)
e51060f0
SH
1050 ib_destroy_cm_id(id_priv->cm_id.ib);
1051 break;
07ebafba 1052 case RDMA_TRANSPORT_IWARP:
0c9361fc 1053 if (id_priv->cm_id.iw)
07ebafba
TT
1054 iw_destroy_cm_id(id_priv->cm_id.iw);
1055 break;
e51060f0
SH
1056 default:
1057 break;
1058 }
c8f6a362 1059 cma_leave_mc_groups(id_priv);
a396d43a 1060 cma_release_dev(id_priv);
e51060f0
SH
1061 }
1062
1063 cma_release_port(id_priv);
1064 cma_deref_id(id_priv);
1065 wait_for_completion(&id_priv->comp);
1066
d02d1f53
SH
1067 if (id_priv->internal_id)
1068 cma_deref_id(id_priv->id.context);
1069
e51060f0
SH
1070 kfree(id_priv->id.route.path_rec);
1071 kfree(id_priv);
1072}
1073EXPORT_SYMBOL(rdma_destroy_id);
1074
1075static int cma_rep_recv(struct rdma_id_private *id_priv)
1076{
1077 int ret;
1078
5851bb89 1079 ret = cma_modify_qp_rtr(id_priv, NULL);
e51060f0
SH
1080 if (ret)
1081 goto reject;
1082
5851bb89 1083 ret = cma_modify_qp_rts(id_priv, NULL);
e51060f0
SH
1084 if (ret)
1085 goto reject;
1086
1087 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1088 if (ret)
1089 goto reject;
1090
1091 return 0;
1092reject:
c5483388 1093 cma_modify_qp_err(id_priv);
e51060f0
SH
1094 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1095 NULL, 0, NULL, 0);
1096 return ret;
1097}
1098
a1b1b61f
SH
1099static void cma_set_rep_event_data(struct rdma_cm_event *event,
1100 struct ib_cm_rep_event_param *rep_data,
1101 void *private_data)
1102{
1103 event->param.conn.private_data = private_data;
1104 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1105 event->param.conn.responder_resources = rep_data->responder_resources;
1106 event->param.conn.initiator_depth = rep_data->initiator_depth;
1107 event->param.conn.flow_control = rep_data->flow_control;
1108 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1109 event->param.conn.srq = rep_data->srq;
1110 event->param.conn.qp_num = rep_data->remote_qpn;
1111}
1112
e51060f0
SH
1113static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1114{
1115 struct rdma_id_private *id_priv = cm_id->context;
a1b1b61f
SH
1116 struct rdma_cm_event event;
1117 int ret = 0;
e51060f0 1118
38ca83a5 1119 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
550e5ca7 1120 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
38ca83a5 1121 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
550e5ca7 1122 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
8aa08602 1123 return 0;
e51060f0 1124
a1b1b61f 1125 memset(&event, 0, sizeof event);
e51060f0
SH
1126 switch (ib_event->event) {
1127 case IB_CM_REQ_ERROR:
1128 case IB_CM_REP_ERROR:
a1b1b61f
SH
1129 event.event = RDMA_CM_EVENT_UNREACHABLE;
1130 event.status = -ETIMEDOUT;
e51060f0
SH
1131 break;
1132 case IB_CM_REP_RECEIVED:
01602f11 1133 if (id_priv->id.qp) {
a1b1b61f
SH
1134 event.status = cma_rep_recv(id_priv);
1135 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1136 RDMA_CM_EVENT_ESTABLISHED;
01602f11 1137 } else {
a1b1b61f 1138 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
01602f11 1139 }
a1b1b61f
SH
1140 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1141 ib_event->private_data);
e51060f0
SH
1142 break;
1143 case IB_CM_RTU_RECEIVED:
0fe313b0
SH
1144 case IB_CM_USER_ESTABLISHED:
1145 event.event = RDMA_CM_EVENT_ESTABLISHED;
e51060f0
SH
1146 break;
1147 case IB_CM_DREQ_ERROR:
a1b1b61f 1148 event.status = -ETIMEDOUT; /* fall through */
e51060f0
SH
1149 case IB_CM_DREQ_RECEIVED:
1150 case IB_CM_DREP_RECEIVED:
550e5ca7
NM
1151 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1152 RDMA_CM_DISCONNECT))
e51060f0 1153 goto out;
a1b1b61f 1154 event.event = RDMA_CM_EVENT_DISCONNECTED;
e51060f0
SH
1155 break;
1156 case IB_CM_TIMEWAIT_EXIT:
38ca83a5
AV
1157 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1158 break;
e51060f0
SH
1159 case IB_CM_MRA_RECEIVED:
1160 /* ignore event */
1161 goto out;
1162 case IB_CM_REJ_RECEIVED:
c5483388 1163 cma_modify_qp_err(id_priv);
a1b1b61f
SH
1164 event.status = ib_event->param.rej_rcvd.reason;
1165 event.event = RDMA_CM_EVENT_REJECTED;
1166 event.param.conn.private_data = ib_event->private_data;
1167 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
e51060f0
SH
1168 break;
1169 default:
468f2239 1170 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
e51060f0
SH
1171 ib_event->event);
1172 goto out;
1173 }
1174
a1b1b61f 1175 ret = id_priv->id.event_handler(&id_priv->id, &event);
e51060f0
SH
1176 if (ret) {
1177 /* Destroy the CM ID by returning a non-zero value. */
1178 id_priv->cm_id.ib = NULL;
550e5ca7 1179 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 1180 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
1181 rdma_destroy_id(&id_priv->id);
1182 return ret;
1183 }
1184out:
de910bd9 1185 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
1186 return ret;
1187}
1188
628e5f6d
SH
1189static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1190 struct ib_cm_event *ib_event)
e51060f0
SH
1191{
1192 struct rdma_id_private *id_priv;
1193 struct rdma_cm_id *id;
1194 struct rdma_route *rt;
64c5e613 1195 int ret;
e51060f0
SH
1196
1197 id = rdma_create_id(listen_id->event_handler, listen_id->context,
b26f9b99 1198 listen_id->ps, ib_event->param.req_rcvd.qp_type);
e51060f0 1199 if (IS_ERR(id))
0c9361fc 1200 return NULL;
3f168d2b 1201
f4753834 1202 id_priv = container_of(id, struct rdma_id_private, id);
fbaa1a6d
SH
1203 if (cma_save_net_info(id, listen_id, ib_event))
1204 goto err;
e51060f0
SH
1205
1206 rt = &id->route;
1207 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
3f168d2b
KK
1208 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1209 GFP_KERNEL);
e51060f0 1210 if (!rt->path_rec)
0c9361fc 1211 goto err;
e51060f0 1212
e51060f0
SH
1213 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1214 if (rt->num_paths == 2)
1215 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1216
f4753834 1217 if (cma_any_addr(cma_src_addr(id_priv))) {
6f8372b6
SH
1218 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1219 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
46ea5061 1220 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
6f8372b6 1221 } else {
f4753834 1222 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
6f8372b6 1223 if (ret)
0c9361fc 1224 goto err;
6f8372b6
SH
1225 }
1226 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
e51060f0 1227
550e5ca7 1228 id_priv->state = RDMA_CM_CONNECT;
e51060f0 1229 return id_priv;
3f168d2b 1230
3f168d2b 1231err:
0c9361fc 1232 rdma_destroy_id(id);
e51060f0
SH
1233 return NULL;
1234}
1235
628e5f6d
SH
1236static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1237 struct ib_cm_event *ib_event)
1238{
1239 struct rdma_id_private *id_priv;
1240 struct rdma_cm_id *id;
628e5f6d
SH
1241 int ret;
1242
1243 id = rdma_create_id(listen_id->event_handler, listen_id->context,
b26f9b99 1244 listen_id->ps, IB_QPT_UD);
628e5f6d
SH
1245 if (IS_ERR(id))
1246 return NULL;
1247
f4753834 1248 id_priv = container_of(id, struct rdma_id_private, id);
fbaa1a6d 1249 if (cma_save_net_info(id, listen_id, ib_event))
628e5f6d
SH
1250 goto err;
1251
6f8372b6 1252 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
f4753834 1253 ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
6f8372b6
SH
1254 if (ret)
1255 goto err;
1256 }
628e5f6d 1257
550e5ca7 1258 id_priv->state = RDMA_CM_CONNECT;
628e5f6d
SH
1259 return id_priv;
1260err:
1261 rdma_destroy_id(id);
1262 return NULL;
1263}
1264
a1b1b61f
SH
1265static void cma_set_req_event_data(struct rdma_cm_event *event,
1266 struct ib_cm_req_event_param *req_data,
1267 void *private_data, int offset)
1268{
1269 event->param.conn.private_data = private_data + offset;
1270 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1271 event->param.conn.responder_resources = req_data->responder_resources;
1272 event->param.conn.initiator_depth = req_data->initiator_depth;
1273 event->param.conn.flow_control = req_data->flow_control;
1274 event->param.conn.retry_count = req_data->retry_count;
1275 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1276 event->param.conn.srq = req_data->srq;
1277 event->param.conn.qp_num = req_data->remote_qpn;
1278}
1279
9595480c
HS
1280static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1281{
4dd81e89 1282 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
9595480c
HS
1283 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1284 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1285 (id->qp_type == IB_QPT_UD)) ||
1286 (!id->qp_type));
1287}
1288
e51060f0
SH
1289static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1290{
1291 struct rdma_id_private *listen_id, *conn_id;
a1b1b61f 1292 struct rdma_cm_event event;
e51060f0
SH
1293 int offset, ret;
1294
1295 listen_id = cm_id->context;
9595480c
HS
1296 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
1297 return -EINVAL;
1298
550e5ca7 1299 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
8aa08602 1300 return -ECONNABORTED;
e51060f0 1301
628e5f6d 1302 memset(&event, 0, sizeof event);
e8160e15 1303 offset = cma_user_data_offset(listen_id);
628e5f6d 1304 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
9595480c 1305 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
628e5f6d
SH
1306 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1307 event.param.ud.private_data = ib_event->private_data + offset;
1308 event.param.ud.private_data_len =
1309 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1310 } else {
1311 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1312 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1313 ib_event->private_data, offset);
1314 }
e51060f0
SH
1315 if (!conn_id) {
1316 ret = -ENOMEM;
b6cec8aa 1317 goto err1;
e51060f0
SH
1318 }
1319
de910bd9 1320 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
be9130cc 1321 ret = cma_acquire_dev(conn_id, listen_id);
a1a733f6 1322 if (ret)
b6cec8aa 1323 goto err2;
e51060f0
SH
1324
1325 conn_id->cm_id.ib = cm_id;
1326 cm_id->context = conn_id;
1327 cm_id->cm_handler = cma_ib_handler;
1328
25ae21a1
SH
1329 /*
1330 * Protect against the user destroying conn_id from another thread
1331 * until we're done accessing it.
1332 */
1333 atomic_inc(&conn_id->refcount);
a1b1b61f 1334 ret = conn_id->id.event_handler(&conn_id->id, &event);
b6cec8aa
SH
1335 if (ret)
1336 goto err3;
b6cec8aa
SH
1337 /*
1338 * Acquire mutex to prevent user executing rdma_destroy_id()
1339 * while we're accessing the cm_id.
1340 */
1341 mutex_lock(&lock);
dd5f03be
MB
1342 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1343 (conn_id->id.qp_type != IB_QPT_UD))
b6cec8aa
SH
1344 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1345 mutex_unlock(&lock);
1346 mutex_unlock(&conn_id->handler_mutex);
1347 mutex_unlock(&listen_id->handler_mutex);
25ae21a1 1348 cma_deref_id(conn_id);
b6cec8aa 1349 return 0;
a1a733f6 1350
b6cec8aa
SH
1351err3:
1352 cma_deref_id(conn_id);
a1a733f6
KK
1353 /* Destroy the CM ID by returning a non-zero value. */
1354 conn_id->cm_id.ib = NULL;
b6cec8aa 1355err2:
550e5ca7 1356 cma_exch(conn_id, RDMA_CM_DESTROYING);
de910bd9 1357 mutex_unlock(&conn_id->handler_mutex);
b6cec8aa 1358err1:
de910bd9 1359 mutex_unlock(&listen_id->handler_mutex);
b6cec8aa
SH
1360 if (conn_id)
1361 rdma_destroy_id(&conn_id->id);
e51060f0
SH
1362 return ret;
1363}
1364
cf53936f 1365__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
e51060f0 1366{
496ce3ce
SH
1367 if (addr->sa_family == AF_IB)
1368 return ((struct sockaddr_ib *) addr)->sib_sid;
1369
cf53936f 1370 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
e51060f0 1371}
cf53936f 1372EXPORT_SYMBOL(rdma_get_service_id);
e51060f0
SH
1373
1374static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1375 struct ib_cm_compare_data *compare)
1376{
1377 struct cma_hdr *cma_data, *cma_mask;
1b90c137 1378 __be32 ip4_addr;
e51060f0
SH
1379 struct in6_addr ip6_addr;
1380
1381 memset(compare, 0, sizeof *compare);
1382 cma_data = (void *) compare->data;
1383 cma_mask = (void *) compare->mask;
e51060f0
SH
1384
1385 switch (addr->sa_family) {
1386 case AF_INET:
1387 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
01602f11
SH
1388 cma_set_ip_ver(cma_data, 4);
1389 cma_set_ip_ver(cma_mask, 0xF);
1390 if (!cma_any_addr(addr)) {
1391 cma_data->dst_addr.ip4.addr = ip4_addr;
1392 cma_mask->dst_addr.ip4.addr = htonl(~0);
e51060f0
SH
1393 }
1394 break;
1395 case AF_INET6:
1396 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
01602f11
SH
1397 cma_set_ip_ver(cma_data, 6);
1398 cma_set_ip_ver(cma_mask, 0xF);
1399 if (!cma_any_addr(addr)) {
1400 cma_data->dst_addr.ip6 = ip6_addr;
1401 memset(&cma_mask->dst_addr.ip6, 0xFF,
1402 sizeof cma_mask->dst_addr.ip6);
e51060f0
SH
1403 }
1404 break;
1405 default:
1406 break;
1407 }
1408}
1409
07ebafba
TT
1410static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1411{
1412 struct rdma_id_private *id_priv = iw_id->context;
a1b1b61f 1413 struct rdma_cm_event event;
07ebafba 1414 int ret = 0;
24d44a39
SW
1415 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1416 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
07ebafba 1417
550e5ca7 1418 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
be65f086 1419 return 0;
07ebafba 1420
be65f086 1421 memset(&event, 0, sizeof event);
07ebafba
TT
1422 switch (iw_event->event) {
1423 case IW_CM_EVENT_CLOSE:
a1b1b61f 1424 event.event = RDMA_CM_EVENT_DISCONNECTED;
07ebafba
TT
1425 break;
1426 case IW_CM_EVENT_CONNECT_REPLY:
24d44a39
SW
1427 memcpy(cma_src_addr(id_priv), laddr,
1428 rdma_addr_size(laddr));
1429 memcpy(cma_dst_addr(id_priv), raddr,
1430 rdma_addr_size(raddr));
881a045f
SW
1431 switch (iw_event->status) {
1432 case 0:
a1b1b61f 1433 event.event = RDMA_CM_EVENT_ESTABLISHED;
3ebeebc3
KS
1434 event.param.conn.initiator_depth = iw_event->ird;
1435 event.param.conn.responder_resources = iw_event->ord;
881a045f
SW
1436 break;
1437 case -ECONNRESET:
1438 case -ECONNREFUSED:
1439 event.event = RDMA_CM_EVENT_REJECTED;
1440 break;
1441 case -ETIMEDOUT:
1442 event.event = RDMA_CM_EVENT_UNREACHABLE;
1443 break;
1444 default:
1445 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1446 break;
1447 }
07ebafba
TT
1448 break;
1449 case IW_CM_EVENT_ESTABLISHED:
a1b1b61f 1450 event.event = RDMA_CM_EVENT_ESTABLISHED;
3ebeebc3
KS
1451 event.param.conn.initiator_depth = iw_event->ird;
1452 event.param.conn.responder_resources = iw_event->ord;
07ebafba
TT
1453 break;
1454 default:
1455 BUG_ON(1);
1456 }
1457
a1b1b61f
SH
1458 event.status = iw_event->status;
1459 event.param.conn.private_data = iw_event->private_data;
1460 event.param.conn.private_data_len = iw_event->private_data_len;
1461 ret = id_priv->id.event_handler(&id_priv->id, &event);
07ebafba
TT
1462 if (ret) {
1463 /* Destroy the CM ID by returning a non-zero value. */
1464 id_priv->cm_id.iw = NULL;
550e5ca7 1465 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 1466 mutex_unlock(&id_priv->handler_mutex);
07ebafba
TT
1467 rdma_destroy_id(&id_priv->id);
1468 return ret;
1469 }
1470
de910bd9 1471 mutex_unlock(&id_priv->handler_mutex);
07ebafba
TT
1472 return ret;
1473}
1474
1475static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1476 struct iw_cm_event *iw_event)
1477{
1478 struct rdma_cm_id *new_cm_id;
1479 struct rdma_id_private *listen_id, *conn_id;
a1b1b61f 1480 struct rdma_cm_event event;
07ebafba 1481 int ret;
8d8293cf 1482 struct ib_device_attr attr;
24d44a39
SW
1483 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1484 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
07ebafba
TT
1485
1486 listen_id = cm_id->context;
550e5ca7 1487 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
8aa08602 1488 return -ECONNABORTED;
07ebafba
TT
1489
1490 /* Create a new RDMA id for the new IW CM ID */
1491 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1492 listen_id->id.context,
b26f9b99 1493 RDMA_PS_TCP, IB_QPT_RC);
10f32065 1494 if (IS_ERR(new_cm_id)) {
07ebafba
TT
1495 ret = -ENOMEM;
1496 goto out;
1497 }
1498 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
de910bd9 1499 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
550e5ca7 1500 conn_id->state = RDMA_CM_CONNECT;
07ebafba 1501
dd5f03be 1502 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
07ebafba 1503 if (ret) {
de910bd9 1504 mutex_unlock(&conn_id->handler_mutex);
07ebafba
TT
1505 rdma_destroy_id(new_cm_id);
1506 goto out;
1507 }
1508
be9130cc 1509 ret = cma_acquire_dev(conn_id, listen_id);
07ebafba 1510 if (ret) {
de910bd9 1511 mutex_unlock(&conn_id->handler_mutex);
07ebafba
TT
1512 rdma_destroy_id(new_cm_id);
1513 goto out;
1514 }
1515
1516 conn_id->cm_id.iw = cm_id;
1517 cm_id->context = conn_id;
1518 cm_id->cm_handler = cma_iw_handler;
1519
24d44a39
SW
1520 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
1521 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
07ebafba 1522
8d8293cf
SW
1523 ret = ib_query_device(conn_id->id.device, &attr);
1524 if (ret) {
de910bd9 1525 mutex_unlock(&conn_id->handler_mutex);
8d8293cf
SW
1526 rdma_destroy_id(new_cm_id);
1527 goto out;
1528 }
1529
a1b1b61f
SH
1530 memset(&event, 0, sizeof event);
1531 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1532 event.param.conn.private_data = iw_event->private_data;
1533 event.param.conn.private_data_len = iw_event->private_data_len;
3ebeebc3
KS
1534 event.param.conn.initiator_depth = iw_event->ird;
1535 event.param.conn.responder_resources = iw_event->ord;
25ae21a1
SH
1536
1537 /*
1538 * Protect against the user destroying conn_id from another thread
1539 * until we're done accessing it.
1540 */
1541 atomic_inc(&conn_id->refcount);
a1b1b61f 1542 ret = conn_id->id.event_handler(&conn_id->id, &event);
07ebafba
TT
1543 if (ret) {
1544 /* User wants to destroy the CM ID */
1545 conn_id->cm_id.iw = NULL;
550e5ca7 1546 cma_exch(conn_id, RDMA_CM_DESTROYING);
de910bd9 1547 mutex_unlock(&conn_id->handler_mutex);
25ae21a1 1548 cma_deref_id(conn_id);
07ebafba 1549 rdma_destroy_id(&conn_id->id);
de910bd9 1550 goto out;
07ebafba
TT
1551 }
1552
de910bd9 1553 mutex_unlock(&conn_id->handler_mutex);
25ae21a1 1554 cma_deref_id(conn_id);
de910bd9 1555
07ebafba 1556out:
de910bd9 1557 mutex_unlock(&listen_id->handler_mutex);
07ebafba
TT
1558 return ret;
1559}
1560
e51060f0
SH
1561static int cma_ib_listen(struct rdma_id_private *id_priv)
1562{
1563 struct ib_cm_compare_data compare_data;
1564 struct sockaddr *addr;
0c9361fc 1565 struct ib_cm_id *id;
e51060f0
SH
1566 __be64 svc_id;
1567 int ret;
1568
0c9361fc
JM
1569 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1570 if (IS_ERR(id))
1571 return PTR_ERR(id);
1572
1573 id_priv->cm_id.ib = id;
e51060f0 1574
f4753834 1575 addr = cma_src_addr(id_priv);
cf53936f 1576 svc_id = rdma_get_service_id(&id_priv->id, addr);
406b6a25 1577 if (cma_any_addr(addr) && !id_priv->afonly)
e51060f0
SH
1578 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1579 else {
1580 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1581 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1582 }
1583
1584 if (ret) {
1585 ib_destroy_cm_id(id_priv->cm_id.ib);
1586 id_priv->cm_id.ib = NULL;
1587 }
1588
1589 return ret;
1590}
1591
07ebafba
TT
1592static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1593{
1594 int ret;
0c9361fc
JM
1595 struct iw_cm_id *id;
1596
1597 id = iw_create_cm_id(id_priv->id.device,
1598 iw_conn_req_handler,
1599 id_priv);
1600 if (IS_ERR(id))
1601 return PTR_ERR(id);
07ebafba 1602
0c9361fc 1603 id_priv->cm_id.iw = id;
07ebafba 1604
24d44a39
SW
1605 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
1606 rdma_addr_size(cma_src_addr(id_priv)));
07ebafba
TT
1607
1608 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1609
1610 if (ret) {
1611 iw_destroy_cm_id(id_priv->cm_id.iw);
1612 id_priv->cm_id.iw = NULL;
1613 }
1614
1615 return ret;
1616}
1617
e51060f0
SH
1618static int cma_listen_handler(struct rdma_cm_id *id,
1619 struct rdma_cm_event *event)
1620{
1621 struct rdma_id_private *id_priv = id->context;
1622
1623 id->context = id_priv->id.context;
1624 id->event_handler = id_priv->id.event_handler;
1625 return id_priv->id.event_handler(id, event);
1626}
1627
1628static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1629 struct cma_device *cma_dev)
1630{
1631 struct rdma_id_private *dev_id_priv;
1632 struct rdma_cm_id *id;
1633 int ret;
1634
94d0c939
SH
1635 if (cma_family(id_priv) == AF_IB &&
1636 rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
1637 return;
1638
b26f9b99
SH
1639 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1640 id_priv->id.qp_type);
e51060f0
SH
1641 if (IS_ERR(id))
1642 return;
1643
1644 dev_id_priv = container_of(id, struct rdma_id_private, id);
1645
550e5ca7 1646 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
f4753834
SH
1647 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
1648 rdma_addr_size(cma_src_addr(id_priv)));
e51060f0
SH
1649
1650 cma_attach_to_dev(dev_id_priv, cma_dev);
1651 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
d02d1f53
SH
1652 atomic_inc(&id_priv->refcount);
1653 dev_id_priv->internal_id = 1;
5b0ec991 1654 dev_id_priv->afonly = id_priv->afonly;
e51060f0
SH
1655
1656 ret = rdma_listen(id, id_priv->backlog);
1657 if (ret)
d02d1f53 1658 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
468f2239 1659 "listening on device %s\n", ret, cma_dev->device->name);
e51060f0
SH
1660}
1661
1662static void cma_listen_on_all(struct rdma_id_private *id_priv)
1663{
1664 struct cma_device *cma_dev;
1665
1666 mutex_lock(&lock);
1667 list_add_tail(&id_priv->list, &listen_any_list);
1668 list_for_each_entry(cma_dev, &dev_list, list)
1669 cma_listen_on_dev(id_priv, cma_dev);
1670 mutex_unlock(&lock);
1671}
1672
a81c994d
SH
1673void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1674{
1675 struct rdma_id_private *id_priv;
1676
1677 id_priv = container_of(id, struct rdma_id_private, id);
1678 id_priv->tos = (u8) tos;
1679}
1680EXPORT_SYMBOL(rdma_set_service_type);
1681
e51060f0
SH
1682static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1683 void *context)
1684{
1685 struct cma_work *work = context;
1686 struct rdma_route *route;
1687
1688 route = &work->id->id.route;
1689
1690 if (!status) {
1691 route->num_paths = 1;
1692 *route->path_rec = *path_rec;
1693 } else {
550e5ca7
NM
1694 work->old_state = RDMA_CM_ROUTE_QUERY;
1695 work->new_state = RDMA_CM_ADDR_RESOLVED;
e51060f0 1696 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
8f0472d3 1697 work->event.status = status;
e51060f0
SH
1698 }
1699
1700 queue_work(cma_wq, &work->work);
1701}
1702
1703static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1704 struct cma_work *work)
1705{
f4753834 1706 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
e51060f0 1707 struct ib_sa_path_rec path_rec;
a81c994d
SH
1708 ib_sa_comp_mask comp_mask;
1709 struct sockaddr_in6 *sin6;
f68194ca 1710 struct sockaddr_ib *sib;
e51060f0
SH
1711
1712 memset(&path_rec, 0, sizeof path_rec);
f4753834
SH
1713 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
1714 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
1715 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
e51060f0 1716 path_rec.numb_path = 1;
962063e6 1717 path_rec.reversible = 1;
cf53936f 1718 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
a81c994d
SH
1719
1720 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1721 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1722 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1723
f68194ca
SH
1724 switch (cma_family(id_priv)) {
1725 case AF_INET:
a81c994d
SH
1726 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1727 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
f68194ca
SH
1728 break;
1729 case AF_INET6:
f4753834 1730 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
a81c994d
SH
1731 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1732 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
f68194ca
SH
1733 break;
1734 case AF_IB:
1735 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
1736 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
1737 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1738 break;
a81c994d 1739 }
e51060f0 1740
c1a0b23b 1741 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
a81c994d
SH
1742 id_priv->id.port_num, &path_rec,
1743 comp_mask, timeout_ms,
1744 GFP_KERNEL, cma_query_handler,
1745 work, &id_priv->query);
e51060f0
SH
1746
1747 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1748}
1749
c4028958 1750static void cma_work_handler(struct work_struct *_work)
e51060f0 1751{
c4028958 1752 struct cma_work *work = container_of(_work, struct cma_work, work);
e51060f0
SH
1753 struct rdma_id_private *id_priv = work->id;
1754 int destroy = 0;
1755
de910bd9 1756 mutex_lock(&id_priv->handler_mutex);
e51060f0
SH
1757 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1758 goto out;
1759
1760 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
550e5ca7 1761 cma_exch(id_priv, RDMA_CM_DESTROYING);
e51060f0
SH
1762 destroy = 1;
1763 }
1764out:
de910bd9 1765 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
1766 cma_deref_id(id_priv);
1767 if (destroy)
1768 rdma_destroy_id(&id_priv->id);
1769 kfree(work);
1770}
1771
dd5bdff8
OG
1772static void cma_ndev_work_handler(struct work_struct *_work)
1773{
1774 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1775 struct rdma_id_private *id_priv = work->id;
1776 int destroy = 0;
1777
1778 mutex_lock(&id_priv->handler_mutex);
550e5ca7
NM
1779 if (id_priv->state == RDMA_CM_DESTROYING ||
1780 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
dd5bdff8
OG
1781 goto out;
1782
1783 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
550e5ca7 1784 cma_exch(id_priv, RDMA_CM_DESTROYING);
dd5bdff8
OG
1785 destroy = 1;
1786 }
1787
1788out:
1789 mutex_unlock(&id_priv->handler_mutex);
1790 cma_deref_id(id_priv);
1791 if (destroy)
1792 rdma_destroy_id(&id_priv->id);
1793 kfree(work);
1794}
1795
e51060f0
SH
1796static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1797{
1798 struct rdma_route *route = &id_priv->id.route;
1799 struct cma_work *work;
1800 int ret;
1801
1802 work = kzalloc(sizeof *work, GFP_KERNEL);
1803 if (!work)
1804 return -ENOMEM;
1805
1806 work->id = id_priv;
c4028958 1807 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
1808 work->old_state = RDMA_CM_ROUTE_QUERY;
1809 work->new_state = RDMA_CM_ROUTE_RESOLVED;
e51060f0
SH
1810 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1811
1812 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1813 if (!route->path_rec) {
1814 ret = -ENOMEM;
1815 goto err1;
1816 }
1817
1818 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1819 if (ret)
1820 goto err2;
1821
1822 return 0;
1823err2:
1824 kfree(route->path_rec);
1825 route->path_rec = NULL;
1826err1:
1827 kfree(work);
1828 return ret;
1829}
1830
1831int rdma_set_ib_paths(struct rdma_cm_id *id,
1832 struct ib_sa_path_rec *path_rec, int num_paths)
1833{
1834 struct rdma_id_private *id_priv;
1835 int ret;
1836
1837 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7
NM
1838 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1839 RDMA_CM_ROUTE_RESOLVED))
e51060f0
SH
1840 return -EINVAL;
1841
9893e742
JL
1842 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1843 GFP_KERNEL);
e51060f0
SH
1844 if (!id->route.path_rec) {
1845 ret = -ENOMEM;
1846 goto err;
1847 }
1848
ae2d9293 1849 id->route.num_paths = num_paths;
e51060f0
SH
1850 return 0;
1851err:
550e5ca7 1852 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
e51060f0
SH
1853 return ret;
1854}
1855EXPORT_SYMBOL(rdma_set_ib_paths);
1856
07ebafba
TT
1857static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1858{
1859 struct cma_work *work;
1860
1861 work = kzalloc(sizeof *work, GFP_KERNEL);
1862 if (!work)
1863 return -ENOMEM;
1864
1865 work->id = id_priv;
c4028958 1866 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
1867 work->old_state = RDMA_CM_ROUTE_QUERY;
1868 work->new_state = RDMA_CM_ROUTE_RESOLVED;
07ebafba
TT
1869 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1870 queue_work(cma_wq, &work->work);
1871 return 0;
1872}
1873
eb072c4b
EP
1874static int iboe_tos_to_sl(struct net_device *ndev, int tos)
1875{
1876 int prio;
1877 struct net_device *dev;
1878
1879 prio = rt_tos2priority(tos);
1880 dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
1881 vlan_dev_real_dev(ndev) : ndev;
1882
1883 if (dev->num_tc)
1884 return netdev_get_prio_tc_map(dev, prio);
1885
1886#if IS_ENABLED(CONFIG_VLAN_8021Q)
1887 if (ndev->priv_flags & IFF_802_1Q_VLAN)
1888 return (vlan_dev_get_egress_qos_mask(ndev, prio) &
1889 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1890#endif
1891 return 0;
1892}
1893
3c86aa70
EC
1894static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1895{
1896 struct rdma_route *route = &id_priv->id.route;
1897 struct rdma_addr *addr = &route->addr;
1898 struct cma_work *work;
1899 int ret;
3c86aa70 1900 struct net_device *ndev = NULL;
dd5f03be 1901
3c86aa70 1902
3c86aa70
EC
1903 work = kzalloc(sizeof *work, GFP_KERNEL);
1904 if (!work)
1905 return -ENOMEM;
1906
1907 work->id = id_priv;
1908 INIT_WORK(&work->work, cma_work_handler);
1909
1910 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
1911 if (!route->path_rec) {
1912 ret = -ENOMEM;
1913 goto err1;
1914 }
1915
1916 route->num_paths = 1;
1917
3c86aa70
EC
1918 if (addr->dev_addr.bound_dev_if)
1919 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
1920 if (!ndev) {
1921 ret = -ENODEV;
1922 goto err2;
1923 }
1924
dd5f03be
MB
1925 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
1926 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
1927 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
af7bd463 1928
7b85627b
MS
1929 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
1930 &route->path_rec->sgid);
1931 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
1932 &route->path_rec->dgid);
af7bd463
EC
1933
1934 route->path_rec->hop_limit = 1;
1935 route->path_rec->reversible = 1;
1936 route->path_rec->pkey = cpu_to_be16(0xffff);
1937 route->path_rec->mtu_selector = IB_SA_EQ;
eb072c4b 1938 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
3c86aa70
EC
1939 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
1940 route->path_rec->rate_selector = IB_SA_EQ;
1941 route->path_rec->rate = iboe_get_rate(ndev);
1942 dev_put(ndev);
1943 route->path_rec->packet_life_time_selector = IB_SA_EQ;
1944 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
1945 if (!route->path_rec->mtu) {
1946 ret = -EINVAL;
1947 goto err2;
1948 }
1949
550e5ca7
NM
1950 work->old_state = RDMA_CM_ROUTE_QUERY;
1951 work->new_state = RDMA_CM_ROUTE_RESOLVED;
3c86aa70
EC
1952 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1953 work->event.status = 0;
1954
1955 queue_work(cma_wq, &work->work);
1956
1957 return 0;
1958
1959err2:
1960 kfree(route->path_rec);
1961 route->path_rec = NULL;
1962err1:
1963 kfree(work);
1964 return ret;
1965}
1966
e51060f0
SH
1967int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1968{
1969 struct rdma_id_private *id_priv;
1970 int ret;
1971
1972 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 1973 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
e51060f0
SH
1974 return -EINVAL;
1975
1976 atomic_inc(&id_priv->refcount);
07ebafba
TT
1977 switch (rdma_node_get_transport(id->device->node_type)) {
1978 case RDMA_TRANSPORT_IB:
3c86aa70
EC
1979 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
1980 case IB_LINK_LAYER_INFINIBAND:
1981 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1982 break;
1983 case IB_LINK_LAYER_ETHERNET:
1984 ret = cma_resolve_iboe_route(id_priv);
1985 break;
1986 default:
1987 ret = -ENOSYS;
1988 }
e51060f0 1989 break;
07ebafba
TT
1990 case RDMA_TRANSPORT_IWARP:
1991 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1992 break;
e51060f0
SH
1993 default:
1994 ret = -ENOSYS;
1995 break;
1996 }
1997 if (ret)
1998 goto err;
1999
2000 return 0;
2001err:
550e5ca7 2002 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
e51060f0
SH
2003 cma_deref_id(id_priv);
2004 return ret;
2005}
2006EXPORT_SYMBOL(rdma_resolve_route);
2007
6a3e362d
SH
2008static void cma_set_loopback(struct sockaddr *addr)
2009{
2010 switch (addr->sa_family) {
2011 case AF_INET:
2012 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2013 break;
2014 case AF_INET6:
2015 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
2016 0, 0, 0, htonl(1));
2017 break;
2018 default:
2019 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
2020 0, 0, 0, htonl(1));
2021 break;
2022 }
2023}
2024
e51060f0
SH
2025static int cma_bind_loopback(struct rdma_id_private *id_priv)
2026{
b0569e40 2027 struct cma_device *cma_dev, *cur_dev;
e51060f0 2028 struct ib_port_attr port_attr;
f0ee3404 2029 union ib_gid gid;
e51060f0
SH
2030 u16 pkey;
2031 int ret;
2032 u8 p;
2033
b0569e40 2034 cma_dev = NULL;
e51060f0 2035 mutex_lock(&lock);
b0569e40
SH
2036 list_for_each_entry(cur_dev, &dev_list, list) {
2037 if (cma_family(id_priv) == AF_IB &&
2038 rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
2039 continue;
2040
2041 if (!cma_dev)
2042 cma_dev = cur_dev;
2043
2044 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
2045 if (!ib_query_port(cur_dev->device, p, &port_attr) &&
2046 port_attr.state == IB_PORT_ACTIVE) {
2047 cma_dev = cur_dev;
2048 goto port_found;
2049 }
2050 }
2051 }
2052
2053 if (!cma_dev) {
e82153b5
KK
2054 ret = -ENODEV;
2055 goto out;
2056 }
e51060f0 2057
e82153b5 2058 p = 1;
e51060f0
SH
2059
2060port_found:
f0ee3404 2061 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
e51060f0
SH
2062 if (ret)
2063 goto out;
2064
2065 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2066 if (ret)
2067 goto out;
2068
6f8372b6 2069 id_priv->id.route.addr.dev_addr.dev_type =
3c86aa70 2070 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
6f8372b6
SH
2071 ARPHRD_INFINIBAND : ARPHRD_ETHER;
2072
2073 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
e51060f0
SH
2074 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
2075 id_priv->id.port_num = p;
2076 cma_attach_to_dev(id_priv, cma_dev);
f4753834 2077 cma_set_loopback(cma_src_addr(id_priv));
e51060f0
SH
2078out:
2079 mutex_unlock(&lock);
2080 return ret;
2081}
2082
2083static void addr_handler(int status, struct sockaddr *src_addr,
2084 struct rdma_dev_addr *dev_addr, void *context)
2085{
2086 struct rdma_id_private *id_priv = context;
a1b1b61f 2087 struct rdma_cm_event event;
e51060f0 2088
a1b1b61f 2089 memset(&event, 0, sizeof event);
de910bd9 2090 mutex_lock(&id_priv->handler_mutex);
550e5ca7
NM
2091 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2092 RDMA_CM_ADDR_RESOLVED))
61a73c70 2093 goto out;
61a73c70 2094
7b85627b 2095 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
61a73c70 2096 if (!status && !id_priv->cma_dev)
be9130cc 2097 status = cma_acquire_dev(id_priv, NULL);
e51060f0
SH
2098
2099 if (status) {
550e5ca7
NM
2100 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2101 RDMA_CM_ADDR_BOUND))
e51060f0 2102 goto out;
a1b1b61f
SH
2103 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2104 event.status = status;
7b85627b 2105 } else
a1b1b61f 2106 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
e51060f0 2107
a1b1b61f 2108 if (id_priv->id.event_handler(&id_priv->id, &event)) {
550e5ca7 2109 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 2110 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
2111 cma_deref_id(id_priv);
2112 rdma_destroy_id(&id_priv->id);
2113 return;
2114 }
2115out:
de910bd9 2116 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
2117 cma_deref_id(id_priv);
2118}
2119
2120static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2121{
2122 struct cma_work *work;
f0ee3404 2123 union ib_gid gid;
e51060f0
SH
2124 int ret;
2125
2126 work = kzalloc(sizeof *work, GFP_KERNEL);
2127 if (!work)
2128 return -ENOMEM;
2129
2130 if (!id_priv->cma_dev) {
2131 ret = cma_bind_loopback(id_priv);
2132 if (ret)
2133 goto err;
2134 }
2135
6f8372b6
SH
2136 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2137 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
e51060f0 2138
e51060f0 2139 work->id = id_priv;
c4028958 2140 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
2141 work->old_state = RDMA_CM_ADDR_QUERY;
2142 work->new_state = RDMA_CM_ADDR_RESOLVED;
e51060f0
SH
2143 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2144 queue_work(cma_wq, &work->work);
2145 return 0;
2146err:
2147 kfree(work);
2148 return ret;
2149}
2150
f17df3b0
SH
2151static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
2152{
2153 struct cma_work *work;
2154 int ret;
2155
2156 work = kzalloc(sizeof *work, GFP_KERNEL);
2157 if (!work)
2158 return -ENOMEM;
2159
2160 if (!id_priv->cma_dev) {
2161 ret = cma_resolve_ib_dev(id_priv);
2162 if (ret)
2163 goto err;
2164 }
2165
2166 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
2167 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
2168
2169 work->id = id_priv;
2170 INIT_WORK(&work->work, cma_work_handler);
2171 work->old_state = RDMA_CM_ADDR_QUERY;
2172 work->new_state = RDMA_CM_ADDR_RESOLVED;
2173 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2174 queue_work(cma_wq, &work->work);
2175 return 0;
2176err:
2177 kfree(work);
2178 return ret;
2179}
2180
e51060f0
SH
2181static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2182 struct sockaddr *dst_addr)
2183{
d14714df
SH
2184 if (!src_addr || !src_addr->sa_family) {
2185 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
f17df3b0
SH
2186 src_addr->sa_family = dst_addr->sa_family;
2187 if (dst_addr->sa_family == AF_INET6) {
d14714df
SH
2188 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
2189 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
f17df3b0
SH
2190 } else if (dst_addr->sa_family == AF_IB) {
2191 ((struct sockaddr_ib *) src_addr)->sib_pkey =
2192 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
d14714df
SH
2193 }
2194 }
2195 return rdma_bind_addr(id, src_addr);
e51060f0
SH
2196}
2197
2198int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2199 struct sockaddr *dst_addr, int timeout_ms)
2200{
2201 struct rdma_id_private *id_priv;
2202 int ret;
2203
2204 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2205 if (id_priv->state == RDMA_CM_IDLE) {
e51060f0
SH
2206 ret = cma_bind_addr(id, src_addr, dst_addr);
2207 if (ret)
2208 return ret;
2209 }
2210
4ae7152e
SH
2211 if (cma_family(id_priv) != dst_addr->sa_family)
2212 return -EINVAL;
2213
550e5ca7 2214 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
e51060f0
SH
2215 return -EINVAL;
2216
2217 atomic_inc(&id_priv->refcount);
f4753834 2218 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
f17df3b0 2219 if (cma_any_addr(dst_addr)) {
e51060f0 2220 ret = cma_resolve_loopback(id_priv);
f17df3b0
SH
2221 } else {
2222 if (dst_addr->sa_family == AF_IB) {
2223 ret = cma_resolve_ib_addr(id_priv);
2224 } else {
2225 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
2226 dst_addr, &id->route.addr.dev_addr,
2227 timeout_ms, addr_handler, id_priv);
2228 }
2229 }
e51060f0
SH
2230 if (ret)
2231 goto err;
2232
2233 return 0;
2234err:
550e5ca7 2235 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
e51060f0
SH
2236 cma_deref_id(id_priv);
2237 return ret;
2238}
2239EXPORT_SYMBOL(rdma_resolve_addr);
2240
a9bb7912
HS
2241int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2242{
2243 struct rdma_id_private *id_priv;
2244 unsigned long flags;
2245 int ret;
2246
2247 id_priv = container_of(id, struct rdma_id_private, id);
2248 spin_lock_irqsave(&id_priv->lock, flags);
c8dea2f9 2249 if (reuse || id_priv->state == RDMA_CM_IDLE) {
a9bb7912
HS
2250 id_priv->reuseaddr = reuse;
2251 ret = 0;
2252 } else {
2253 ret = -EINVAL;
2254 }
2255 spin_unlock_irqrestore(&id_priv->lock, flags);
2256 return ret;
2257}
2258EXPORT_SYMBOL(rdma_set_reuseaddr);
2259
68602120
SH
2260int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
2261{
2262 struct rdma_id_private *id_priv;
2263 unsigned long flags;
2264 int ret;
2265
2266 id_priv = container_of(id, struct rdma_id_private, id);
2267 spin_lock_irqsave(&id_priv->lock, flags);
2268 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
2269 id_priv->options |= (1 << CMA_OPTION_AFONLY);
2270 id_priv->afonly = afonly;
2271 ret = 0;
2272 } else {
2273 ret = -EINVAL;
2274 }
2275 spin_unlock_irqrestore(&id_priv->lock, flags);
2276 return ret;
2277}
2278EXPORT_SYMBOL(rdma_set_afonly);
2279
e51060f0
SH
2280static void cma_bind_port(struct rdma_bind_list *bind_list,
2281 struct rdma_id_private *id_priv)
2282{
58afdcb7
SH
2283 struct sockaddr *addr;
2284 struct sockaddr_ib *sib;
2285 u64 sid, mask;
2286 __be16 port;
e51060f0 2287
f4753834 2288 addr = cma_src_addr(id_priv);
58afdcb7
SH
2289 port = htons(bind_list->port);
2290
2291 switch (addr->sa_family) {
2292 case AF_INET:
2293 ((struct sockaddr_in *) addr)->sin_port = port;
2294 break;
2295 case AF_INET6:
2296 ((struct sockaddr_in6 *) addr)->sin6_port = port;
2297 break;
2298 case AF_IB:
2299 sib = (struct sockaddr_ib *) addr;
2300 sid = be64_to_cpu(sib->sib_sid);
2301 mask = be64_to_cpu(sib->sib_sid_mask);
2302 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
2303 sib->sib_sid_mask = cpu_to_be64(~0ULL);
2304 break;
2305 }
e51060f0
SH
2306 id_priv->bind_list = bind_list;
2307 hlist_add_head(&id_priv->node, &bind_list->owners);
2308}
2309
2310static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
2311 unsigned short snum)
2312{
2313 struct rdma_bind_list *bind_list;
3b069c5d 2314 int ret;
e51060f0 2315
cb164b8c 2316 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
e51060f0
SH
2317 if (!bind_list)
2318 return -ENOMEM;
2319
3b069c5d
TH
2320 ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
2321 if (ret < 0)
2322 goto err;
aedec080
SH
2323
2324 bind_list->ps = ps;
3b069c5d 2325 bind_list->port = (unsigned short)ret;
aedec080
SH
2326 cma_bind_port(bind_list, id_priv);
2327 return 0;
3b069c5d 2328err:
aedec080 2329 kfree(bind_list);
3b069c5d 2330 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
aedec080 2331}
e51060f0 2332
aedec080
SH
2333static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
2334{
5d7220e8
TH
2335 static unsigned int last_used_port;
2336 int low, high, remaining;
2337 unsigned int rover;
e51060f0 2338
0bbf87d8 2339 inet_get_local_port_range(&init_net, &low, &high);
5d7220e8 2340 remaining = (high - low) + 1;
63862b5b 2341 rover = prandom_u32() % remaining + low;
5d7220e8
TH
2342retry:
2343 if (last_used_port != rover &&
2344 !idr_find(ps, (unsigned short) rover)) {
2345 int ret = cma_alloc_port(ps, id_priv, rover);
2346 /*
2347 * Remember previously used port number in order to avoid
2348 * re-using same port immediately after it is closed.
2349 */
2350 if (!ret)
2351 last_used_port = rover;
2352 if (ret != -EADDRNOTAVAIL)
2353 return ret;
e51060f0 2354 }
5d7220e8
TH
2355 if (--remaining) {
2356 rover++;
2357 if ((rover < low) || (rover > high))
2358 rover = low;
2359 goto retry;
2360 }
2361 return -EADDRNOTAVAIL;
e51060f0
SH
2362}
2363
a9bb7912
HS
2364/*
2365 * Check that the requested port is available. This is called when trying to
2366 * bind to a specific port, or when trying to listen on a bound port. In
2367 * the latter case, the provided id_priv may already be on the bind_list, but
2368 * we still need to check that it's okay to start listening.
2369 */
2370static int cma_check_port(struct rdma_bind_list *bind_list,
2371 struct rdma_id_private *id_priv, uint8_t reuseaddr)
e51060f0
SH
2372{
2373 struct rdma_id_private *cur_id;
43b752da 2374 struct sockaddr *addr, *cur_addr;
e51060f0 2375
f4753834 2376 addr = cma_src_addr(id_priv);
b67bfe0d 2377 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
a9bb7912
HS
2378 if (id_priv == cur_id)
2379 continue;
3cd96564 2380
5b0ec991
SH
2381 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
2382 cur_id->reuseaddr)
2383 continue;
e51060f0 2384
f4753834 2385 cur_addr = cma_src_addr(cur_id);
5b0ec991
SH
2386 if (id_priv->afonly && cur_id->afonly &&
2387 (addr->sa_family != cur_addr->sa_family))
2388 continue;
2389
2390 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
2391 return -EADDRNOTAVAIL;
2392
2393 if (!cma_addr_cmp(addr, cur_addr))
2394 return -EADDRINUSE;
a9bb7912 2395 }
e51060f0
SH
2396 return 0;
2397}
2398
a9bb7912
HS
2399static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2400{
2401 struct rdma_bind_list *bind_list;
2402 unsigned short snum;
2403 int ret;
2404
f4753834 2405 snum = ntohs(cma_port(cma_src_addr(id_priv)));
a9bb7912
HS
2406 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2407 return -EACCES;
2408
2409 bind_list = idr_find(ps, snum);
2410 if (!bind_list) {
2411 ret = cma_alloc_port(ps, id_priv, snum);
2412 } else {
2413 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2414 if (!ret)
2415 cma_bind_port(bind_list, id_priv);
2416 }
2417 return ret;
2418}
2419
2420static int cma_bind_listen(struct rdma_id_private *id_priv)
2421{
2422 struct rdma_bind_list *bind_list = id_priv->bind_list;
2423 int ret = 0;
2424
2425 mutex_lock(&lock);
2426 if (bind_list->owners.first->next)
2427 ret = cma_check_port(bind_list, id_priv, 0);
2428 mutex_unlock(&lock);
2429 return ret;
2430}
2431
58afdcb7 2432static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
e51060f0 2433{
e51060f0 2434 switch (id_priv->id.ps) {
e51060f0 2435 case RDMA_PS_TCP:
58afdcb7 2436 return &tcp_ps;
628e5f6d 2437 case RDMA_PS_UDP:
58afdcb7 2438 return &udp_ps;
c8f6a362 2439 case RDMA_PS_IPOIB:
58afdcb7 2440 return &ipoib_ps;
2d2e9415 2441 case RDMA_PS_IB:
58afdcb7 2442 return &ib_ps;
e51060f0 2443 default:
58afdcb7
SH
2444 return NULL;
2445 }
2446}
2447
2448static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
2449{
2450 struct idr *ps = NULL;
2451 struct sockaddr_ib *sib;
2452 u64 sid_ps, mask, sid;
2453
f4753834 2454 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
58afdcb7
SH
2455 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
2456 sid = be64_to_cpu(sib->sib_sid) & mask;
2457
2458 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
2459 sid_ps = RDMA_IB_IP_PS_IB;
2460 ps = &ib_ps;
2461 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
2462 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
2463 sid_ps = RDMA_IB_IP_PS_TCP;
2464 ps = &tcp_ps;
2465 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
2466 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
2467 sid_ps = RDMA_IB_IP_PS_UDP;
2468 ps = &udp_ps;
e51060f0
SH
2469 }
2470
58afdcb7
SH
2471 if (ps) {
2472 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
2473 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
2474 be64_to_cpu(sib->sib_sid_mask));
2475 }
2476 return ps;
2477}
2478
2479static int cma_get_port(struct rdma_id_private *id_priv)
2480{
2481 struct idr *ps;
2482 int ret;
2483
f4753834 2484 if (cma_family(id_priv) != AF_IB)
58afdcb7
SH
2485 ps = cma_select_inet_ps(id_priv);
2486 else
2487 ps = cma_select_ib_ps(id_priv);
2488 if (!ps)
2489 return -EPROTONOSUPPORT;
2490
e51060f0 2491 mutex_lock(&lock);
f4753834 2492 if (cma_any_port(cma_src_addr(id_priv)))
aedec080 2493 ret = cma_alloc_any_port(ps, id_priv);
e51060f0
SH
2494 else
2495 ret = cma_use_port(ps, id_priv);
2496 mutex_unlock(&lock);
2497
2498 return ret;
2499}
2500
d14714df
SH
2501static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2502 struct sockaddr *addr)
2503{
d90f9b35 2504#if IS_ENABLED(CONFIG_IPV6)
d14714df
SH
2505 struct sockaddr_in6 *sin6;
2506
2507 if (addr->sa_family != AF_INET6)
2508 return 0;
2509
2510 sin6 = (struct sockaddr_in6 *) addr;
5462eddd
SK
2511
2512 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
2513 return 0;
2514
2515 if (!sin6->sin6_scope_id)
d14714df
SH
2516 return -EINVAL;
2517
2518 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2519#endif
2520 return 0;
2521}
2522
a9bb7912
HS
2523int rdma_listen(struct rdma_cm_id *id, int backlog)
2524{
2525 struct rdma_id_private *id_priv;
2526 int ret;
2527
2528 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2529 if (id_priv->state == RDMA_CM_IDLE) {
f4753834
SH
2530 id->route.addr.src_addr.ss_family = AF_INET;
2531 ret = rdma_bind_addr(id, cma_src_addr(id_priv));
a9bb7912
HS
2532 if (ret)
2533 return ret;
2534 }
2535
550e5ca7 2536 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
a9bb7912
HS
2537 return -EINVAL;
2538
2539 if (id_priv->reuseaddr) {
2540 ret = cma_bind_listen(id_priv);
2541 if (ret)
2542 goto err;
2543 }
2544
2545 id_priv->backlog = backlog;
2546 if (id->device) {
2547 switch (rdma_node_get_transport(id->device->node_type)) {
2548 case RDMA_TRANSPORT_IB:
2549 ret = cma_ib_listen(id_priv);
2550 if (ret)
2551 goto err;
2552 break;
2553 case RDMA_TRANSPORT_IWARP:
2554 ret = cma_iw_listen(id_priv, backlog);
2555 if (ret)
2556 goto err;
2557 break;
2558 default:
2559 ret = -ENOSYS;
2560 goto err;
2561 }
2562 } else
2563 cma_listen_on_all(id_priv);
2564
2565 return 0;
2566err:
2567 id_priv->backlog = 0;
550e5ca7 2568 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
a9bb7912
HS
2569 return ret;
2570}
2571EXPORT_SYMBOL(rdma_listen);
2572
e51060f0
SH
2573int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2574{
2575 struct rdma_id_private *id_priv;
2576 int ret;
2577
680f920a
SH
2578 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
2579 addr->sa_family != AF_IB)
e51060f0
SH
2580 return -EAFNOSUPPORT;
2581
2582 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2583 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
e51060f0
SH
2584 return -EINVAL;
2585
d14714df
SH
2586 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2587 if (ret)
2588 goto err1;
2589
7b85627b 2590 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
8523c048 2591 if (!cma_any_addr(addr)) {
680f920a 2592 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
e51060f0 2593 if (ret)
255d0c14
KK
2594 goto err1;
2595
be9130cc 2596 ret = cma_acquire_dev(id_priv, NULL);
255d0c14
KK
2597 if (ret)
2598 goto err1;
e51060f0
SH
2599 }
2600
68602120
SH
2601 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
2602 if (addr->sa_family == AF_INET)
2603 id_priv->afonly = 1;
5b0ec991 2604#if IS_ENABLED(CONFIG_IPV6)
68602120
SH
2605 else if (addr->sa_family == AF_INET6)
2606 id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
5b0ec991 2607#endif
68602120 2608 }
e51060f0
SH
2609 ret = cma_get_port(id_priv);
2610 if (ret)
255d0c14 2611 goto err2;
e51060f0
SH
2612
2613 return 0;
255d0c14 2614err2:
a396d43a
SH
2615 if (id_priv->cma_dev)
2616 cma_release_dev(id_priv);
255d0c14 2617err1:
550e5ca7 2618 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
e51060f0
SH
2619 return ret;
2620}
2621EXPORT_SYMBOL(rdma_bind_addr);
2622
f4753834 2623static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
e51060f0 2624{
e51060f0 2625 struct cma_hdr *cma_hdr;
e51060f0 2626
01602f11
SH
2627 cma_hdr = hdr;
2628 cma_hdr->cma_version = CMA_VERSION;
f4753834 2629 if (cma_family(id_priv) == AF_INET) {
1f5175ad
AS
2630 struct sockaddr_in *src4, *dst4;
2631
f4753834
SH
2632 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
2633 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
1f5175ad 2634
01602f11
SH
2635 cma_set_ip_ver(cma_hdr, 4);
2636 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2637 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2638 cma_hdr->port = src4->sin_port;
e8160e15 2639 } else if (cma_family(id_priv) == AF_INET6) {
1f5175ad
AS
2640 struct sockaddr_in6 *src6, *dst6;
2641
f4753834
SH
2642 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2643 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
1f5175ad 2644
01602f11
SH
2645 cma_set_ip_ver(cma_hdr, 6);
2646 cma_hdr->src_addr.ip6 = src6->sin6_addr;
2647 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
2648 cma_hdr->port = src6->sin6_port;
e51060f0
SH
2649 }
2650 return 0;
2651}
2652
628e5f6d
SH
2653static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2654 struct ib_cm_event *ib_event)
2655{
2656 struct rdma_id_private *id_priv = cm_id->context;
2657 struct rdma_cm_event event;
2658 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2659 int ret = 0;
2660
550e5ca7 2661 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
8aa08602 2662 return 0;
628e5f6d 2663
8aa08602 2664 memset(&event, 0, sizeof event);
628e5f6d
SH
2665 switch (ib_event->event) {
2666 case IB_CM_SIDR_REQ_ERROR:
2667 event.event = RDMA_CM_EVENT_UNREACHABLE;
2668 event.status = -ETIMEDOUT;
2669 break;
2670 case IB_CM_SIDR_REP_RECEIVED:
2671 event.param.ud.private_data = ib_event->private_data;
2672 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2673 if (rep->status != IB_SIDR_SUCCESS) {
2674 event.event = RDMA_CM_EVENT_UNREACHABLE;
2675 event.status = ib_event->param.sidr_rep_rcvd.status;
2676 break;
2677 }
5c438135 2678 ret = cma_set_qkey(id_priv, rep->qkey);
d2ca39f2
YE
2679 if (ret) {
2680 event.event = RDMA_CM_EVENT_ADDR_ERROR;
5c438135 2681 event.status = ret;
628e5f6d
SH
2682 break;
2683 }
2684 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2685 id_priv->id.route.path_rec,
2686 &event.param.ud.ah_attr);
2687 event.param.ud.qp_num = rep->qpn;
2688 event.param.ud.qkey = rep->qkey;
2689 event.event = RDMA_CM_EVENT_ESTABLISHED;
2690 event.status = 0;
2691 break;
2692 default:
468f2239 2693 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
628e5f6d
SH
2694 ib_event->event);
2695 goto out;
2696 }
2697
2698 ret = id_priv->id.event_handler(&id_priv->id, &event);
2699 if (ret) {
2700 /* Destroy the CM ID by returning a non-zero value. */
2701 id_priv->cm_id.ib = NULL;
550e5ca7 2702 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 2703 mutex_unlock(&id_priv->handler_mutex);
628e5f6d
SH
2704 rdma_destroy_id(&id_priv->id);
2705 return ret;
2706 }
2707out:
de910bd9 2708 mutex_unlock(&id_priv->handler_mutex);
628e5f6d
SH
2709 return ret;
2710}
2711
2712static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2713 struct rdma_conn_param *conn_param)
2714{
2715 struct ib_cm_sidr_req_param req;
0c9361fc 2716 struct ib_cm_id *id;
e511d1ae 2717 void *private_data;
e8160e15 2718 int offset, ret;
628e5f6d 2719
e511d1ae 2720 memset(&req, 0, sizeof req);
e8160e15
SH
2721 offset = cma_user_data_offset(id_priv);
2722 req.private_data_len = offset + conn_param->private_data_len;
04ded167
SH
2723 if (req.private_data_len < conn_param->private_data_len)
2724 return -EINVAL;
2725
e8160e15 2726 if (req.private_data_len) {
e511d1ae
SH
2727 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2728 if (!private_data)
e8160e15
SH
2729 return -ENOMEM;
2730 } else {
e511d1ae 2731 private_data = NULL;
e8160e15 2732 }
628e5f6d
SH
2733
2734 if (conn_param->private_data && conn_param->private_data_len)
e511d1ae
SH
2735 memcpy(private_data + offset, conn_param->private_data,
2736 conn_param->private_data_len);
628e5f6d 2737
e511d1ae
SH
2738 if (private_data) {
2739 ret = cma_format_hdr(private_data, id_priv);
e8160e15
SH
2740 if (ret)
2741 goto out;
e511d1ae 2742 req.private_data = private_data;
e8160e15 2743 }
628e5f6d 2744
0c9361fc
JM
2745 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
2746 id_priv);
2747 if (IS_ERR(id)) {
2748 ret = PTR_ERR(id);
628e5f6d
SH
2749 goto out;
2750 }
0c9361fc 2751 id_priv->cm_id.ib = id;
628e5f6d 2752
f4753834 2753 req.path = id_priv->id.route.path_rec;
cf53936f 2754 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
628e5f6d
SH
2755 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2756 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2757
2758 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2759 if (ret) {
2760 ib_destroy_cm_id(id_priv->cm_id.ib);
2761 id_priv->cm_id.ib = NULL;
2762 }
2763out:
e511d1ae 2764 kfree(private_data);
628e5f6d
SH
2765 return ret;
2766}
2767
e51060f0
SH
2768static int cma_connect_ib(struct rdma_id_private *id_priv,
2769 struct rdma_conn_param *conn_param)
2770{
2771 struct ib_cm_req_param req;
2772 struct rdma_route *route;
2773 void *private_data;
0c9361fc 2774 struct ib_cm_id *id;
e51060f0
SH
2775 int offset, ret;
2776
2777 memset(&req, 0, sizeof req);
e8160e15 2778 offset = cma_user_data_offset(id_priv);
e51060f0 2779 req.private_data_len = offset + conn_param->private_data_len;
04ded167
SH
2780 if (req.private_data_len < conn_param->private_data_len)
2781 return -EINVAL;
2782
e8160e15
SH
2783 if (req.private_data_len) {
2784 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2785 if (!private_data)
2786 return -ENOMEM;
2787 } else {
2788 private_data = NULL;
2789 }
e51060f0
SH
2790
2791 if (conn_param->private_data && conn_param->private_data_len)
2792 memcpy(private_data + offset, conn_param->private_data,
2793 conn_param->private_data_len);
2794
0c9361fc
JM
2795 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
2796 if (IS_ERR(id)) {
2797 ret = PTR_ERR(id);
e51060f0
SH
2798 goto out;
2799 }
0c9361fc 2800 id_priv->cm_id.ib = id;
e51060f0
SH
2801
2802 route = &id_priv->id.route;
e8160e15
SH
2803 if (private_data) {
2804 ret = cma_format_hdr(private_data, id_priv);
2805 if (ret)
2806 goto out;
2807 req.private_data = private_data;
2808 }
e51060f0
SH
2809
2810 req.primary_path = &route->path_rec[0];
2811 if (route->num_paths == 2)
2812 req.alternate_path = &route->path_rec[1];
2813
cf53936f 2814 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
e51060f0 2815 req.qp_num = id_priv->qp_num;
18c441a6 2816 req.qp_type = id_priv->id.qp_type;
e51060f0
SH
2817 req.starting_psn = id_priv->seq_num;
2818 req.responder_resources = conn_param->responder_resources;
2819 req.initiator_depth = conn_param->initiator_depth;
2820 req.flow_control = conn_param->flow_control;
4ede178a
SH
2821 req.retry_count = min_t(u8, 7, conn_param->retry_count);
2822 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
e51060f0
SH
2823 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2824 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2825 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2826 req.srq = id_priv->srq ? 1 : 0;
2827
2828 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2829out:
0c9361fc
JM
2830 if (ret && !IS_ERR(id)) {
2831 ib_destroy_cm_id(id);
675a027c
KK
2832 id_priv->cm_id.ib = NULL;
2833 }
2834
e51060f0
SH
2835 kfree(private_data);
2836 return ret;
2837}
2838
07ebafba
TT
2839static int cma_connect_iw(struct rdma_id_private *id_priv,
2840 struct rdma_conn_param *conn_param)
2841{
2842 struct iw_cm_id *cm_id;
07ebafba
TT
2843 int ret;
2844 struct iw_cm_conn_param iw_param;
2845
2846 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
0c9361fc
JM
2847 if (IS_ERR(cm_id))
2848 return PTR_ERR(cm_id);
07ebafba
TT
2849
2850 id_priv->cm_id.iw = cm_id;
2851
24d44a39
SW
2852 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
2853 rdma_addr_size(cma_src_addr(id_priv)));
2854 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
2855 rdma_addr_size(cma_dst_addr(id_priv)));
07ebafba 2856
5851bb89 2857 ret = cma_modify_qp_rtr(id_priv, conn_param);
675a027c
KK
2858 if (ret)
2859 goto out;
07ebafba 2860
f45ee80e
HS
2861 if (conn_param) {
2862 iw_param.ord = conn_param->initiator_depth;
2863 iw_param.ird = conn_param->responder_resources;
2864 iw_param.private_data = conn_param->private_data;
2865 iw_param.private_data_len = conn_param->private_data_len;
2866 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
2867 } else {
2868 memset(&iw_param, 0, sizeof iw_param);
07ebafba 2869 iw_param.qpn = id_priv->qp_num;
f45ee80e 2870 }
07ebafba
TT
2871 ret = iw_cm_connect(cm_id, &iw_param);
2872out:
0c9361fc 2873 if (ret) {
675a027c
KK
2874 iw_destroy_cm_id(cm_id);
2875 id_priv->cm_id.iw = NULL;
2876 }
07ebafba
TT
2877 return ret;
2878}
2879
e51060f0
SH
2880int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2881{
2882 struct rdma_id_private *id_priv;
2883 int ret;
2884
2885 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2886 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
e51060f0
SH
2887 return -EINVAL;
2888
2889 if (!id->qp) {
2890 id_priv->qp_num = conn_param->qp_num;
e51060f0
SH
2891 id_priv->srq = conn_param->srq;
2892 }
2893
07ebafba
TT
2894 switch (rdma_node_get_transport(id->device->node_type)) {
2895 case RDMA_TRANSPORT_IB:
b26f9b99 2896 if (id->qp_type == IB_QPT_UD)
628e5f6d
SH
2897 ret = cma_resolve_ib_udp(id_priv, conn_param);
2898 else
2899 ret = cma_connect_ib(id_priv, conn_param);
e51060f0 2900 break;
07ebafba
TT
2901 case RDMA_TRANSPORT_IWARP:
2902 ret = cma_connect_iw(id_priv, conn_param);
2903 break;
e51060f0
SH
2904 default:
2905 ret = -ENOSYS;
2906 break;
2907 }
2908 if (ret)
2909 goto err;
2910
2911 return 0;
2912err:
550e5ca7 2913 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
e51060f0
SH
2914 return ret;
2915}
2916EXPORT_SYMBOL(rdma_connect);
2917
2918static int cma_accept_ib(struct rdma_id_private *id_priv,
2919 struct rdma_conn_param *conn_param)
2920{
2921 struct ib_cm_rep_param rep;
5851bb89 2922 int ret;
0fe313b0 2923
5851bb89
SH
2924 ret = cma_modify_qp_rtr(id_priv, conn_param);
2925 if (ret)
2926 goto out;
0fe313b0 2927
5851bb89
SH
2928 ret = cma_modify_qp_rts(id_priv, conn_param);
2929 if (ret)
2930 goto out;
e51060f0
SH
2931
2932 memset(&rep, 0, sizeof rep);
2933 rep.qp_num = id_priv->qp_num;
2934 rep.starting_psn = id_priv->seq_num;
2935 rep.private_data = conn_param->private_data;
2936 rep.private_data_len = conn_param->private_data_len;
2937 rep.responder_resources = conn_param->responder_resources;
2938 rep.initiator_depth = conn_param->initiator_depth;
e51060f0
SH
2939 rep.failover_accepted = 0;
2940 rep.flow_control = conn_param->flow_control;
4ede178a 2941 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
e51060f0
SH
2942 rep.srq = id_priv->srq ? 1 : 0;
2943
0fe313b0
SH
2944 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2945out:
2946 return ret;
e51060f0
SH
2947}
2948
07ebafba
TT
2949static int cma_accept_iw(struct rdma_id_private *id_priv,
2950 struct rdma_conn_param *conn_param)
2951{
2952 struct iw_cm_conn_param iw_param;
2953 int ret;
2954
5851bb89 2955 ret = cma_modify_qp_rtr(id_priv, conn_param);
07ebafba
TT
2956 if (ret)
2957 return ret;
2958
2959 iw_param.ord = conn_param->initiator_depth;
2960 iw_param.ird = conn_param->responder_resources;
2961 iw_param.private_data = conn_param->private_data;
2962 iw_param.private_data_len = conn_param->private_data_len;
2963 if (id_priv->id.qp) {
2964 iw_param.qpn = id_priv->qp_num;
2965 } else
2966 iw_param.qpn = conn_param->qp_num;
2967
2968 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2969}
2970
628e5f6d 2971static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
5c438135 2972 enum ib_cm_sidr_status status, u32 qkey,
628e5f6d
SH
2973 const void *private_data, int private_data_len)
2974{
2975 struct ib_cm_sidr_rep_param rep;
d2ca39f2 2976 int ret;
628e5f6d
SH
2977
2978 memset(&rep, 0, sizeof rep);
2979 rep.status = status;
2980 if (status == IB_SIDR_SUCCESS) {
5c438135 2981 ret = cma_set_qkey(id_priv, qkey);
d2ca39f2
YE
2982 if (ret)
2983 return ret;
628e5f6d 2984 rep.qp_num = id_priv->qp_num;
c8f6a362 2985 rep.qkey = id_priv->qkey;
628e5f6d
SH
2986 }
2987 rep.private_data = private_data;
2988 rep.private_data_len = private_data_len;
2989
2990 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2991}
2992
e51060f0
SH
2993int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2994{
2995 struct rdma_id_private *id_priv;
2996 int ret;
2997
2998 id_priv = container_of(id, struct rdma_id_private, id);
83e9502d
NM
2999
3000 id_priv->owner = task_pid_nr(current);
3001
550e5ca7 3002 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
e51060f0
SH
3003 return -EINVAL;
3004
3005 if (!id->qp && conn_param) {
3006 id_priv->qp_num = conn_param->qp_num;
e51060f0
SH
3007 id_priv->srq = conn_param->srq;
3008 }
3009
07ebafba
TT
3010 switch (rdma_node_get_transport(id->device->node_type)) {
3011 case RDMA_TRANSPORT_IB:
f45ee80e
HS
3012 if (id->qp_type == IB_QPT_UD) {
3013 if (conn_param)
3014 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
5c438135 3015 conn_param->qkey,
f45ee80e
HS
3016 conn_param->private_data,
3017 conn_param->private_data_len);
3018 else
3019 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
5c438135 3020 0, NULL, 0);
f45ee80e
HS
3021 } else {
3022 if (conn_param)
3023 ret = cma_accept_ib(id_priv, conn_param);
3024 else
3025 ret = cma_rep_recv(id_priv);
3026 }
e51060f0 3027 break;
07ebafba
TT
3028 case RDMA_TRANSPORT_IWARP:
3029 ret = cma_accept_iw(id_priv, conn_param);
3030 break;
e51060f0
SH
3031 default:
3032 ret = -ENOSYS;
3033 break;
3034 }
3035
3036 if (ret)
3037 goto reject;
3038
3039 return 0;
3040reject:
c5483388 3041 cma_modify_qp_err(id_priv);
e51060f0
SH
3042 rdma_reject(id, NULL, 0);
3043 return ret;
3044}
3045EXPORT_SYMBOL(rdma_accept);
3046
0fe313b0
SH
3047int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
3048{
3049 struct rdma_id_private *id_priv;
3050 int ret;
3051
3052 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3053 if (!id_priv->cm_id.ib)
0fe313b0
SH
3054 return -EINVAL;
3055
3056 switch (id->device->node_type) {
3057 case RDMA_NODE_IB_CA:
3058 ret = ib_cm_notify(id_priv->cm_id.ib, event);
3059 break;
3060 default:
3061 ret = 0;
3062 break;
3063 }
3064 return ret;
3065}
3066EXPORT_SYMBOL(rdma_notify);
3067
e51060f0
SH
3068int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3069 u8 private_data_len)
3070{
3071 struct rdma_id_private *id_priv;
3072 int ret;
3073
3074 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3075 if (!id_priv->cm_id.ib)
e51060f0
SH
3076 return -EINVAL;
3077
07ebafba
TT
3078 switch (rdma_node_get_transport(id->device->node_type)) {
3079 case RDMA_TRANSPORT_IB:
b26f9b99 3080 if (id->qp_type == IB_QPT_UD)
5c438135 3081 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
628e5f6d
SH
3082 private_data, private_data_len);
3083 else
3084 ret = ib_send_cm_rej(id_priv->cm_id.ib,
3085 IB_CM_REJ_CONSUMER_DEFINED, NULL,
3086 0, private_data, private_data_len);
e51060f0 3087 break;
07ebafba
TT
3088 case RDMA_TRANSPORT_IWARP:
3089 ret = iw_cm_reject(id_priv->cm_id.iw,
3090 private_data, private_data_len);
3091 break;
e51060f0
SH
3092 default:
3093 ret = -ENOSYS;
3094 break;
3095 }
3096 return ret;
3097}
3098EXPORT_SYMBOL(rdma_reject);
3099
3100int rdma_disconnect(struct rdma_cm_id *id)
3101{
3102 struct rdma_id_private *id_priv;
3103 int ret;
3104
3105 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3106 if (!id_priv->cm_id.ib)
e51060f0
SH
3107 return -EINVAL;
3108
07ebafba
TT
3109 switch (rdma_node_get_transport(id->device->node_type)) {
3110 case RDMA_TRANSPORT_IB:
c5483388 3111 ret = cma_modify_qp_err(id_priv);
07ebafba
TT
3112 if (ret)
3113 goto out;
e51060f0
SH
3114 /* Initiate or respond to a disconnect. */
3115 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
3116 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
3117 break;
07ebafba
TT
3118 case RDMA_TRANSPORT_IWARP:
3119 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
3120 break;
e51060f0 3121 default:
07ebafba 3122 ret = -EINVAL;
e51060f0
SH
3123 break;
3124 }
3125out:
3126 return ret;
3127}
3128EXPORT_SYMBOL(rdma_disconnect);
3129
c8f6a362
SH
3130static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3131{
3132 struct rdma_id_private *id_priv;
3133 struct cma_multicast *mc = multicast->context;
3134 struct rdma_cm_event event;
3135 int ret;
3136
3137 id_priv = mc->id_priv;
550e5ca7
NM
3138 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
3139 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
8aa08602 3140 return 0;
c8f6a362 3141
5c438135
SH
3142 if (!status)
3143 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
c5483388 3144 mutex_lock(&id_priv->qp_mutex);
c8f6a362
SH
3145 if (!status && id_priv->id.qp)
3146 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
46ea5061 3147 be16_to_cpu(multicast->rec.mlid));
c5483388 3148 mutex_unlock(&id_priv->qp_mutex);
c8f6a362
SH
3149
3150 memset(&event, 0, sizeof event);
3151 event.status = status;
3152 event.param.ud.private_data = mc->context;
3153 if (!status) {
3154 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
3155 ib_init_ah_from_mcmember(id_priv->id.device,
3156 id_priv->id.port_num, &multicast->rec,
3157 &event.param.ud.ah_attr);
3158 event.param.ud.qp_num = 0xFFFFFF;
3159 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
3160 } else
3161 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
3162
3163 ret = id_priv->id.event_handler(&id_priv->id, &event);
3164 if (ret) {
550e5ca7 3165 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 3166 mutex_unlock(&id_priv->handler_mutex);
c8f6a362
SH
3167 rdma_destroy_id(&id_priv->id);
3168 return 0;
3169 }
8aa08602 3170
de910bd9 3171 mutex_unlock(&id_priv->handler_mutex);
c8f6a362
SH
3172 return 0;
3173}
3174
3175static void cma_set_mgid(struct rdma_id_private *id_priv,
3176 struct sockaddr *addr, union ib_gid *mgid)
3177{
3178 unsigned char mc_map[MAX_ADDR_LEN];
3179 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3180 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
3181 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
3182
3183 if (cma_any_addr(addr)) {
3184 memset(mgid, 0, sizeof *mgid);
3185 } else if ((addr->sa_family == AF_INET6) &&
1c9b2819 3186 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
c8f6a362
SH
3187 0xFF10A01B)) {
3188 /* IPv6 address is an SA assigned MGID. */
3189 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
5bc2b7b3
SH
3190 } else if (addr->sa_family == AF_IB) {
3191 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
e2e62697
JG
3192 } else if ((addr->sa_family == AF_INET6)) {
3193 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
3194 if (id_priv->id.ps == RDMA_PS_UDP)
3195 mc_map[7] = 0x01; /* Use RDMA CM signature */
3196 *mgid = *(union ib_gid *) (mc_map + 4);
c8f6a362 3197 } else {
a9e527e3 3198 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
c8f6a362
SH
3199 if (id_priv->id.ps == RDMA_PS_UDP)
3200 mc_map[7] = 0x01; /* Use RDMA CM signature */
c8f6a362
SH
3201 *mgid = *(union ib_gid *) (mc_map + 4);
3202 }
3203}
3204
3205static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3206 struct cma_multicast *mc)
3207{
3208 struct ib_sa_mcmember_rec rec;
3209 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3210 ib_sa_comp_mask comp_mask;
3211 int ret;
3212
3213 ib_addr_get_mgid(dev_addr, &rec.mgid);
3214 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
3215 &rec.mgid, &rec);
3216 if (ret)
3217 return ret;
3218
5bc2b7b3
SH
3219 ret = cma_set_qkey(id_priv, 0);
3220 if (ret)
3221 return ret;
3222
3f446754 3223 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
5bc2b7b3 3224 rec.qkey = cpu_to_be32(id_priv->qkey);
6f8372b6 3225 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
c8f6a362
SH
3226 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
3227 rec.join_state = 1;
3228
3229 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
3230 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
3231 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
3232 IB_SA_MCMEMBER_REC_FLOW_LABEL |
3233 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
3234
84adeee9
YE
3235 if (id_priv->id.ps == RDMA_PS_IPOIB)
3236 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
2a22fb8c
DB
3237 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
3238 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
3239 IB_SA_MCMEMBER_REC_MTU |
3240 IB_SA_MCMEMBER_REC_HOP_LIMIT;
84adeee9 3241
c8f6a362
SH
3242 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
3243 id_priv->id.port_num, &rec,
3244 comp_mask, GFP_KERNEL,
3245 cma_ib_mc_handler, mc);
8c6ffba0 3246 return PTR_ERR_OR_ZERO(mc->multicast.ib);
c8f6a362
SH
3247}
3248
3c86aa70
EC
3249static void iboe_mcast_work_handler(struct work_struct *work)
3250{
3251 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
3252 struct cma_multicast *mc = mw->mc;
3253 struct ib_sa_multicast *m = mc->multicast.ib;
3254
3255 mc->multicast.ib->context = mc;
3256 cma_ib_mc_handler(0, m);
3257 kref_put(&mc->mcref, release_mc);
3258 kfree(mw);
3259}
3260
3261static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
3262{
3263 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
3264 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
3265
3266 if (cma_any_addr(addr)) {
3267 memset(mgid, 0, sizeof *mgid);
3268 } else if (addr->sa_family == AF_INET6) {
3269 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3270 } else {
3271 mgid->raw[0] = 0xff;
3272 mgid->raw[1] = 0x0e;
3273 mgid->raw[2] = 0;
3274 mgid->raw[3] = 0;
3275 mgid->raw[4] = 0;
3276 mgid->raw[5] = 0;
3277 mgid->raw[6] = 0;
3278 mgid->raw[7] = 0;
3279 mgid->raw[8] = 0;
3280 mgid->raw[9] = 0;
3281 mgid->raw[10] = 0xff;
3282 mgid->raw[11] = 0xff;
3283 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
3284 }
3285}
3286
3287static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3288 struct cma_multicast *mc)
3289{
3290 struct iboe_mcast_work *work;
3291 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3292 int err;
3293 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3294 struct net_device *ndev = NULL;
3295
3296 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3297 return -EINVAL;
3298
3299 work = kzalloc(sizeof *work, GFP_KERNEL);
3300 if (!work)
3301 return -ENOMEM;
3302
3303 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
3304 if (!mc->multicast.ib) {
3305 err = -ENOMEM;
3306 goto out1;
3307 }
3308
3309 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
3310
3311 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
3312 if (id_priv->id.ps == RDMA_PS_UDP)
3313 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
3314
3315 if (dev_addr->bound_dev_if)
3316 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
3317 if (!ndev) {
3318 err = -ENODEV;
3319 goto out2;
3320 }
3321 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
3322 mc->multicast.ib->rec.hop_limit = 1;
3323 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3324 dev_put(ndev);
3325 if (!mc->multicast.ib->rec.mtu) {
3326 err = -EINVAL;
3327 goto out2;
3328 }
7b85627b
MS
3329 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3330 &mc->multicast.ib->rec.port_gid);
3c86aa70
EC
3331 work->id = id_priv;
3332 work->mc = mc;
3333 INIT_WORK(&work->work, iboe_mcast_work_handler);
3334 kref_get(&mc->mcref);
3335 queue_work(cma_wq, &work->work);
3336
3337 return 0;
3338
3339out2:
3340 kfree(mc->multicast.ib);
3341out1:
3342 kfree(work);
3343 return err;
3344}
3345
c8f6a362
SH
3346int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3347 void *context)
3348{
3349 struct rdma_id_private *id_priv;
3350 struct cma_multicast *mc;
3351 int ret;
3352
3353 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7
NM
3354 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3355 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
c8f6a362
SH
3356 return -EINVAL;
3357
3358 mc = kmalloc(sizeof *mc, GFP_KERNEL);
3359 if (!mc)
3360 return -ENOMEM;
3361
ef560861 3362 memcpy(&mc->addr, addr, rdma_addr_size(addr));
c8f6a362
SH
3363 mc->context = context;
3364 mc->id_priv = id_priv;
3365
3366 spin_lock(&id_priv->lock);
3367 list_add(&mc->list, &id_priv->mc_list);
3368 spin_unlock(&id_priv->lock);
3369
3370 switch (rdma_node_get_transport(id->device->node_type)) {
3371 case RDMA_TRANSPORT_IB:
3c86aa70
EC
3372 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3373 case IB_LINK_LAYER_INFINIBAND:
3374 ret = cma_join_ib_multicast(id_priv, mc);
3375 break;
3376 case IB_LINK_LAYER_ETHERNET:
3377 kref_init(&mc->mcref);
3378 ret = cma_iboe_join_multicast(id_priv, mc);
3379 break;
3380 default:
3381 ret = -EINVAL;
3382 }
c8f6a362
SH
3383 break;
3384 default:
3385 ret = -ENOSYS;
3386 break;
3387 }
3388
3389 if (ret) {
3390 spin_lock_irq(&id_priv->lock);
3391 list_del(&mc->list);
3392 spin_unlock_irq(&id_priv->lock);
3393 kfree(mc);
3394 }
3395 return ret;
3396}
3397EXPORT_SYMBOL(rdma_join_multicast);
3398
3399void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3400{
3401 struct rdma_id_private *id_priv;
3402 struct cma_multicast *mc;
3403
3404 id_priv = container_of(id, struct rdma_id_private, id);
3405 spin_lock_irq(&id_priv->lock);
3406 list_for_each_entry(mc, &id_priv->mc_list, list) {
ef560861 3407 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
c8f6a362
SH
3408 list_del(&mc->list);
3409 spin_unlock_irq(&id_priv->lock);
3410
3411 if (id->qp)
3412 ib_detach_mcast(id->qp,
3413 &mc->multicast.ib->rec.mgid,
46ea5061 3414 be16_to_cpu(mc->multicast.ib->rec.mlid));
3c86aa70
EC
3415 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
3416 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3417 case IB_LINK_LAYER_INFINIBAND:
3418 ib_sa_free_multicast(mc->multicast.ib);
3419 kfree(mc);
3420 break;
3421 case IB_LINK_LAYER_ETHERNET:
3422 kref_put(&mc->mcref, release_mc);
3423 break;
3424 default:
3425 break;
3426 }
3427 }
c8f6a362
SH
3428 return;
3429 }
3430 }
3431 spin_unlock_irq(&id_priv->lock);
3432}
3433EXPORT_SYMBOL(rdma_leave_multicast);
3434
dd5bdff8
OG
3435static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
3436{
3437 struct rdma_dev_addr *dev_addr;
3438 struct cma_ndev_work *work;
3439
3440 dev_addr = &id_priv->id.route.addr.dev_addr;
3441
6266ed6e 3442 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
dd5bdff8
OG
3443 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
3444 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3445 ndev->name, &id_priv->id);
3446 work = kzalloc(sizeof *work, GFP_KERNEL);
3447 if (!work)
3448 return -ENOMEM;
3449
3450 INIT_WORK(&work->work, cma_ndev_work_handler);
3451 work->id = id_priv;
3452 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
3453 atomic_inc(&id_priv->refcount);
3454 queue_work(cma_wq, &work->work);
3455 }
3456
3457 return 0;
3458}
3459
3460static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
351638e7 3461 void *ptr)
dd5bdff8 3462{
351638e7 3463 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
dd5bdff8
OG
3464 struct cma_device *cma_dev;
3465 struct rdma_id_private *id_priv;
3466 int ret = NOTIFY_DONE;
3467
3468 if (dev_net(ndev) != &init_net)
3469 return NOTIFY_DONE;
3470
3471 if (event != NETDEV_BONDING_FAILOVER)
3472 return NOTIFY_DONE;
3473
3474 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
3475 return NOTIFY_DONE;
3476
3477 mutex_lock(&lock);
3478 list_for_each_entry(cma_dev, &dev_list, list)
3479 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3480 ret = cma_netdev_change(ndev, id_priv);
3481 if (ret)
3482 goto out;
3483 }
3484
3485out:
3486 mutex_unlock(&lock);
3487 return ret;
3488}
3489
3490static struct notifier_block cma_nb = {
3491 .notifier_call = cma_netdev_callback
3492};
3493
e51060f0
SH
3494static void cma_add_one(struct ib_device *device)
3495{
3496 struct cma_device *cma_dev;
3497 struct rdma_id_private *id_priv;
3498
3499 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
3500 if (!cma_dev)
3501 return;
3502
3503 cma_dev->device = device;
e51060f0
SH
3504
3505 init_completion(&cma_dev->comp);
3506 atomic_set(&cma_dev->refcount, 1);
3507 INIT_LIST_HEAD(&cma_dev->id_list);
3508 ib_set_client_data(device, &cma_client, cma_dev);
3509
3510 mutex_lock(&lock);
3511 list_add_tail(&cma_dev->list, &dev_list);
3512 list_for_each_entry(id_priv, &listen_any_list, list)
3513 cma_listen_on_dev(id_priv, cma_dev);
3514 mutex_unlock(&lock);
e51060f0
SH
3515}
3516
3517static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3518{
a1b1b61f 3519 struct rdma_cm_event event;
550e5ca7 3520 enum rdma_cm_state state;
de910bd9 3521 int ret = 0;
e51060f0
SH
3522
3523 /* Record that we want to remove the device */
550e5ca7
NM
3524 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3525 if (state == RDMA_CM_DESTROYING)
e51060f0
SH
3526 return 0;
3527
3528 cma_cancel_operation(id_priv, state);
de910bd9 3529 mutex_lock(&id_priv->handler_mutex);
e51060f0
SH
3530
3531 /* Check for destruction from another callback. */
550e5ca7 3532 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
de910bd9 3533 goto out;
e51060f0 3534
a1b1b61f
SH
3535 memset(&event, 0, sizeof event);
3536 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
de910bd9
OG
3537 ret = id_priv->id.event_handler(&id_priv->id, &event);
3538out:
3539 mutex_unlock(&id_priv->handler_mutex);
3540 return ret;
e51060f0
SH
3541}
3542
3543static void cma_process_remove(struct cma_device *cma_dev)
3544{
e51060f0
SH
3545 struct rdma_id_private *id_priv;
3546 int ret;
3547
e51060f0
SH
3548 mutex_lock(&lock);
3549 while (!list_empty(&cma_dev->id_list)) {
3550 id_priv = list_entry(cma_dev->id_list.next,
3551 struct rdma_id_private, list);
3552
d02d1f53 3553 list_del(&id_priv->listen_list);
94de178a 3554 list_del_init(&id_priv->list);
e51060f0
SH
3555 atomic_inc(&id_priv->refcount);
3556 mutex_unlock(&lock);
3557
d02d1f53 3558 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
e51060f0
SH
3559 cma_deref_id(id_priv);
3560 if (ret)
3561 rdma_destroy_id(&id_priv->id);
3562
3563 mutex_lock(&lock);
3564 }
3565 mutex_unlock(&lock);
3566
3567 cma_deref_dev(cma_dev);
3568 wait_for_completion(&cma_dev->comp);
3569}
3570
3571static void cma_remove_one(struct ib_device *device)
3572{
3573 struct cma_device *cma_dev;
3574
3575 cma_dev = ib_get_client_data(device, &cma_client);
3576 if (!cma_dev)
3577 return;
3578
3579 mutex_lock(&lock);
3580 list_del(&cma_dev->list);
3581 mutex_unlock(&lock);
3582
3583 cma_process_remove(cma_dev);
3584 kfree(cma_dev);
3585}
3586
753f618a
NM
3587static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3588{
3589 struct nlmsghdr *nlh;
3590 struct rdma_cm_id_stats *id_stats;
3591 struct rdma_id_private *id_priv;
3592 struct rdma_cm_id *id = NULL;
3593 struct cma_device *cma_dev;
3594 int i_dev = 0, i_id = 0;
3595
3596 /*
3597 * We export all of the IDs as a sequence of messages. Each
3598 * ID gets its own netlink message.
3599 */
3600 mutex_lock(&lock);
3601
3602 list_for_each_entry(cma_dev, &dev_list, list) {
3603 if (i_dev < cb->args[0]) {
3604 i_dev++;
3605 continue;
3606 }
3607
3608 i_id = 0;
3609 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3610 if (i_id < cb->args[1]) {
3611 i_id++;
3612 continue;
3613 }
3614
3615 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3616 sizeof *id_stats, RDMA_NL_RDMA_CM,
30dc5e63
TN
3617 RDMA_NL_RDMA_CM_ID_STATS,
3618 NLM_F_MULTI);
753f618a
NM
3619 if (!id_stats)
3620 goto out;
3621
3622 memset(id_stats, 0, sizeof *id_stats);
3623 id = &id_priv->id;
3624 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3625 id_stats->port_num = id->port_num;
3626 id_stats->bound_dev_if =
3627 id->route.addr.dev_addr.bound_dev_if;
3628
ce117ffa
SH
3629 if (ibnl_put_attr(skb, nlh,
3630 rdma_addr_size(cma_src_addr(id_priv)),
3631 cma_src_addr(id_priv),
3632 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
3633 goto out;
3634 if (ibnl_put_attr(skb, nlh,
3635 rdma_addr_size(cma_src_addr(id_priv)),
3636 cma_dst_addr(id_priv),
3637 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
3638 goto out;
753f618a 3639
83e9502d 3640 id_stats->pid = id_priv->owner;
753f618a
NM
3641 id_stats->port_space = id->ps;
3642 id_stats->cm_state = id_priv->state;
3643 id_stats->qp_num = id_priv->qp_num;
3644 id_stats->qp_type = id->qp_type;
3645
3646 i_id++;
3647 }
3648
3649 cb->args[1] = 0;
3650 i_dev++;
3651 }
3652
3653out:
3654 mutex_unlock(&lock);
3655 cb->args[0] = i_dev;
3656 cb->args[1] = i_id;
3657
3658 return skb->len;
3659}
3660
3661static const struct ibnl_client_cbs cma_cb_table[] = {
809d5fc9
G
3662 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
3663 .module = THIS_MODULE },
753f618a
NM
3664};
3665
716abb1f 3666static int __init cma_init(void)
e51060f0 3667{
5d7220e8 3668 int ret;
227b60f5 3669
c7f743a6 3670 cma_wq = create_singlethread_workqueue("rdma_cm");
e51060f0
SH
3671 if (!cma_wq)
3672 return -ENOMEM;
3673
c1a0b23b 3674 ib_sa_register_client(&sa_client);
7a118df3 3675 rdma_addr_register_client(&addr_client);
dd5bdff8 3676 register_netdevice_notifier(&cma_nb);
c1a0b23b 3677
e51060f0
SH
3678 ret = ib_register_client(&cma_client);
3679 if (ret)
3680 goto err;
753f618a
NM
3681
3682 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
3683 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
3684
e51060f0
SH
3685 return 0;
3686
3687err:
dd5bdff8 3688 unregister_netdevice_notifier(&cma_nb);
7a118df3 3689 rdma_addr_unregister_client(&addr_client);
c1a0b23b 3690 ib_sa_unregister_client(&sa_client);
e51060f0
SH
3691 destroy_workqueue(cma_wq);
3692 return ret;
3693}
3694
716abb1f 3695static void __exit cma_cleanup(void)
e51060f0 3696{
753f618a 3697 ibnl_remove_client(RDMA_NL_RDMA_CM);
e51060f0 3698 ib_unregister_client(&cma_client);
dd5bdff8 3699 unregister_netdevice_notifier(&cma_nb);
7a118df3 3700 rdma_addr_unregister_client(&addr_client);
c1a0b23b 3701 ib_sa_unregister_client(&sa_client);
e51060f0 3702 destroy_workqueue(cma_wq);
e51060f0 3703 idr_destroy(&tcp_ps);
628e5f6d 3704 idr_destroy(&udp_ps);
c8f6a362 3705 idr_destroy(&ipoib_ps);
2d2e9415 3706 idr_destroy(&ib_ps);
e51060f0
SH
3707}
3708
3709module_init(cma_init);
3710module_exit(cma_cleanup);
This page took 0.924938 seconds and 5 git commands to generate.