IB/cma: Add net_dev and private data checks to RDMA CM
[deliverable/linux.git] / drivers / infiniband / core / cma.c
CommitLineData
e51060f0
SH
1/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
a9474917
SH
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
e51060f0 12 *
a9474917
SH
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
e51060f0 16 *
a9474917
SH
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
e51060f0 20 *
a9474917
SH
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
e51060f0 25 *
a9474917
SH
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
e51060f0
SH
34 */
35
36#include <linux/completion.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/mutex.h>
40#include <linux/random.h>
41#include <linux/idr.h>
07ebafba 42#include <linux/inetdevice.h>
5a0e3ad6 43#include <linux/slab.h>
e4dd23d7 44#include <linux/module.h>
366cddb4 45#include <net/route.h>
e51060f0
SH
46
47#include <net/tcp.h>
1f5175ad 48#include <net/ipv6.h>
e51060f0
SH
49
50#include <rdma/rdma_cm.h>
51#include <rdma/rdma_cm_ib.h>
753f618a 52#include <rdma/rdma_netlink.h>
2e2d190c 53#include <rdma/ib.h>
e51060f0
SH
54#include <rdma/ib_cache.h>
55#include <rdma/ib_cm.h>
56#include <rdma/ib_sa.h>
07ebafba 57#include <rdma/iw_cm.h>
e51060f0
SH
58
59MODULE_AUTHOR("Sean Hefty");
60MODULE_DESCRIPTION("Generic RDMA CM Agent");
61MODULE_LICENSE("Dual BSD/GPL");
62
63#define CMA_CM_RESPONSE_TIMEOUT 20
d5bb7599 64#define CMA_MAX_CM_RETRIES 15
dcb3f974 65#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
3c86aa70 66#define CMA_IBOE_PACKET_LIFETIME 18
e51060f0 67
2b1b5b60
SG
68static const char * const cma_events[] = {
69 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
70 [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
71 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
72 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
73 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
74 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
75 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
76 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
77 [RDMA_CM_EVENT_REJECTED] = "rejected",
78 [RDMA_CM_EVENT_ESTABLISHED] = "established",
79 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
80 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
81 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
82 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
83 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
84 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
85};
86
87const char *rdma_event_msg(enum rdma_cm_event_type event)
88{
89 size_t index = event;
90
91 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
92 cma_events[index] : "unrecognized event";
93}
94EXPORT_SYMBOL(rdma_event_msg);
95
e51060f0 96static void cma_add_one(struct ib_device *device);
7c1eb45a 97static void cma_remove_one(struct ib_device *device, void *client_data);
e51060f0
SH
98
99static struct ib_client cma_client = {
100 .name = "cma",
101 .add = cma_add_one,
102 .remove = cma_remove_one
103};
104
c1a0b23b 105static struct ib_sa_client sa_client;
7a118df3 106static struct rdma_addr_client addr_client;
e51060f0
SH
107static LIST_HEAD(dev_list);
108static LIST_HEAD(listen_any_list);
109static DEFINE_MUTEX(lock);
110static struct workqueue_struct *cma_wq;
e51060f0 111static DEFINE_IDR(tcp_ps);
628e5f6d 112static DEFINE_IDR(udp_ps);
c8f6a362 113static DEFINE_IDR(ipoib_ps);
2d2e9415 114static DEFINE_IDR(ib_ps);
e51060f0 115
aac978e1
HE
116static struct idr *cma_idr(enum rdma_port_space ps)
117{
118 switch (ps) {
119 case RDMA_PS_TCP:
120 return &tcp_ps;
121 case RDMA_PS_UDP:
122 return &udp_ps;
123 case RDMA_PS_IPOIB:
124 return &ipoib_ps;
125 case RDMA_PS_IB:
126 return &ib_ps;
127 default:
128 return NULL;
129 }
130}
131
e51060f0
SH
132struct cma_device {
133 struct list_head list;
134 struct ib_device *device;
e51060f0
SH
135 struct completion comp;
136 atomic_t refcount;
137 struct list_head id_list;
138};
139
e51060f0 140struct rdma_bind_list {
aac978e1 141 enum rdma_port_space ps;
e51060f0
SH
142 struct hlist_head owners;
143 unsigned short port;
144};
145
aac978e1
HE
146static int cma_ps_alloc(enum rdma_port_space ps,
147 struct rdma_bind_list *bind_list, int snum)
148{
149 struct idr *idr = cma_idr(ps);
150
151 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
152}
153
154static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum)
155{
156 struct idr *idr = cma_idr(ps);
157
158 return idr_find(idr, snum);
159}
160
161static void cma_ps_remove(enum rdma_port_space ps, int snum)
162{
163 struct idr *idr = cma_idr(ps);
164
165 idr_remove(idr, snum);
166}
167
68602120
SH
168enum {
169 CMA_OPTION_AFONLY,
170};
171
e51060f0
SH
172/*
173 * Device removal can occur at anytime, so we need extra handling to
174 * serialize notifying the user of device removal with other callbacks.
175 * We do this by disabling removal notification while a callback is in process,
176 * and reporting it after the callback completes.
177 */
178struct rdma_id_private {
179 struct rdma_cm_id id;
180
181 struct rdma_bind_list *bind_list;
182 struct hlist_node node;
d02d1f53
SH
183 struct list_head list; /* listen_any_list or cma_device.list */
184 struct list_head listen_list; /* per device listens */
e51060f0 185 struct cma_device *cma_dev;
c8f6a362 186 struct list_head mc_list;
e51060f0 187
d02d1f53 188 int internal_id;
550e5ca7 189 enum rdma_cm_state state;
e51060f0 190 spinlock_t lock;
c5483388
SH
191 struct mutex qp_mutex;
192
e51060f0
SH
193 struct completion comp;
194 atomic_t refcount;
de910bd9 195 struct mutex handler_mutex;
e51060f0
SH
196
197 int backlog;
198 int timeout_ms;
199 struct ib_sa_query *query;
200 int query_id;
201 union {
202 struct ib_cm_id *ib;
07ebafba 203 struct iw_cm_id *iw;
e51060f0
SH
204 } cm_id;
205
206 u32 seq_num;
c8f6a362 207 u32 qkey;
e51060f0 208 u32 qp_num;
83e9502d 209 pid_t owner;
68602120 210 u32 options;
e51060f0 211 u8 srq;
a81c994d 212 u8 tos;
a9bb7912 213 u8 reuseaddr;
5b0ec991 214 u8 afonly;
e51060f0
SH
215};
216
c8f6a362
SH
217struct cma_multicast {
218 struct rdma_id_private *id_priv;
219 union {
220 struct ib_sa_multicast *ib;
221 } multicast;
222 struct list_head list;
223 void *context;
3f446754 224 struct sockaddr_storage addr;
3c86aa70 225 struct kref mcref;
c8f6a362
SH
226};
227
e51060f0
SH
228struct cma_work {
229 struct work_struct work;
230 struct rdma_id_private *id;
550e5ca7
NM
231 enum rdma_cm_state old_state;
232 enum rdma_cm_state new_state;
e51060f0
SH
233 struct rdma_cm_event event;
234};
235
dd5bdff8
OG
236struct cma_ndev_work {
237 struct work_struct work;
238 struct rdma_id_private *id;
239 struct rdma_cm_event event;
240};
241
3c86aa70
EC
242struct iboe_mcast_work {
243 struct work_struct work;
244 struct rdma_id_private *id;
245 struct cma_multicast *mc;
246};
247
e51060f0
SH
248union cma_ip_addr {
249 struct in6_addr ip6;
250 struct {
1b90c137
AV
251 __be32 pad[3];
252 __be32 addr;
e51060f0
SH
253 } ip4;
254};
255
256struct cma_hdr {
257 u8 cma_version;
258 u8 ip_version; /* IP version: 7:4 */
1b90c137 259 __be16 port;
e51060f0
SH
260 union cma_ip_addr src_addr;
261 union cma_ip_addr dst_addr;
262};
263
e51060f0 264#define CMA_VERSION 0x00
e51060f0 265
4c21b5bc
HE
266struct cma_req_info {
267 struct ib_device *device;
268 int port;
269 union ib_gid local_gid;
270 __be64 service_id;
271 u16 pkey;
272 bool has_gid:1;
273};
274
550e5ca7 275static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
e51060f0
SH
276{
277 unsigned long flags;
278 int ret;
279
280 spin_lock_irqsave(&id_priv->lock, flags);
281 ret = (id_priv->state == comp);
282 spin_unlock_irqrestore(&id_priv->lock, flags);
283 return ret;
284}
285
286static int cma_comp_exch(struct rdma_id_private *id_priv,
550e5ca7 287 enum rdma_cm_state comp, enum rdma_cm_state exch)
e51060f0
SH
288{
289 unsigned long flags;
290 int ret;
291
292 spin_lock_irqsave(&id_priv->lock, flags);
293 if ((ret = (id_priv->state == comp)))
294 id_priv->state = exch;
295 spin_unlock_irqrestore(&id_priv->lock, flags);
296 return ret;
297}
298
550e5ca7
NM
299static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
300 enum rdma_cm_state exch)
e51060f0
SH
301{
302 unsigned long flags;
550e5ca7 303 enum rdma_cm_state old;
e51060f0
SH
304
305 spin_lock_irqsave(&id_priv->lock, flags);
306 old = id_priv->state;
307 id_priv->state = exch;
308 spin_unlock_irqrestore(&id_priv->lock, flags);
309 return old;
310}
311
4c21b5bc 312static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
e51060f0
SH
313{
314 return hdr->ip_version >> 4;
315}
316
317static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
318{
319 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
320}
321
e51060f0
SH
322static void cma_attach_to_dev(struct rdma_id_private *id_priv,
323 struct cma_device *cma_dev)
324{
325 atomic_inc(&cma_dev->refcount);
326 id_priv->cma_dev = cma_dev;
327 id_priv->id.device = cma_dev->device;
3c86aa70
EC
328 id_priv->id.route.addr.dev_addr.transport =
329 rdma_node_get_transport(cma_dev->device->node_type);
e51060f0
SH
330 list_add_tail(&id_priv->list, &cma_dev->id_list);
331}
332
333static inline void cma_deref_dev(struct cma_device *cma_dev)
334{
335 if (atomic_dec_and_test(&cma_dev->refcount))
336 complete(&cma_dev->comp);
337}
338
3c86aa70
EC
339static inline void release_mc(struct kref *kref)
340{
341 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
342
343 kfree(mc->multicast.ib);
344 kfree(mc);
345}
346
a396d43a 347static void cma_release_dev(struct rdma_id_private *id_priv)
e51060f0 348{
a396d43a 349 mutex_lock(&lock);
e51060f0
SH
350 list_del(&id_priv->list);
351 cma_deref_dev(id_priv->cma_dev);
352 id_priv->cma_dev = NULL;
a396d43a 353 mutex_unlock(&lock);
e51060f0
SH
354}
355
f4753834
SH
356static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
357{
358 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
359}
360
361static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
362{
363 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
364}
365
366static inline unsigned short cma_family(struct rdma_id_private *id_priv)
367{
368 return id_priv->id.route.addr.src_addr.ss_family;
369}
370
5c438135 371static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
c8f6a362
SH
372{
373 struct ib_sa_mcmember_rec rec;
374 int ret = 0;
375
5c438135
SH
376 if (id_priv->qkey) {
377 if (qkey && id_priv->qkey != qkey)
378 return -EINVAL;
d2ca39f2 379 return 0;
5c438135
SH
380 }
381
382 if (qkey) {
383 id_priv->qkey = qkey;
384 return 0;
385 }
d2ca39f2
YE
386
387 switch (id_priv->id.ps) {
c8f6a362 388 case RDMA_PS_UDP:
5c438135 389 case RDMA_PS_IB:
d2ca39f2 390 id_priv->qkey = RDMA_UDP_QKEY;
c8f6a362
SH
391 break;
392 case RDMA_PS_IPOIB:
d2ca39f2
YE
393 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
394 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
395 id_priv->id.port_num, &rec.mgid,
396 &rec);
397 if (!ret)
398 id_priv->qkey = be32_to_cpu(rec.qkey);
c8f6a362
SH
399 break;
400 default:
401 break;
402 }
403 return ret;
404}
405
680f920a
SH
406static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
407{
408 dev_addr->dev_type = ARPHRD_INFINIBAND;
409 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
410 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
411}
412
413static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
414{
415 int ret;
416
417 if (addr->sa_family != AF_IB) {
dd5f03be 418 ret = rdma_translate_ip(addr, dev_addr, NULL);
680f920a
SH
419 } else {
420 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
421 ret = 0;
422 }
423
424 return ret;
425}
426
7c11147d
MW
427static inline int cma_validate_port(struct ib_device *device, u8 port,
428 union ib_gid *gid, int dev_type)
429{
430 u8 found_port;
431 int ret = -ENODEV;
432
433 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
434 return ret;
435
436 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
437 return ret;
438
439 ret = ib_find_cached_gid(device, gid, &found_port, NULL);
440 if (port != found_port)
441 return -ENODEV;
442
443 return ret;
444}
445
be9130cc
DL
446static int cma_acquire_dev(struct rdma_id_private *id_priv,
447 struct rdma_id_private *listen_id_priv)
e51060f0 448{
c8f6a362 449 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
e51060f0 450 struct cma_device *cma_dev;
7c11147d 451 union ib_gid gid, iboe_gid, *gidp;
e51060f0 452 int ret = -ENODEV;
7c11147d 453 u8 port;
e51060f0 454
7c11147d 455 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
2efdd6a0
MS
456 id_priv->id.ps == RDMA_PS_IPOIB)
457 return -EINVAL;
458
a396d43a 459 mutex_lock(&lock);
7b85627b
MS
460 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
461 &iboe_gid);
462
3c86aa70
EC
463 memcpy(&gid, dev_addr->src_dev_addr +
464 rdma_addr_gid_offset(dev_addr), sizeof gid);
7c11147d
MW
465
466 if (listen_id_priv) {
be9130cc
DL
467 cma_dev = listen_id_priv->cma_dev;
468 port = listen_id_priv->id.port_num;
5d9fb044 469 gidp = rdma_protocol_roce(cma_dev->device, port) ?
7c11147d 470 &iboe_gid : &gid;
be9130cc 471
7c11147d
MW
472 ret = cma_validate_port(cma_dev->device, port, gidp,
473 dev_addr->dev_type);
474 if (!ret) {
475 id_priv->id.port_num = port;
be9130cc
DL
476 goto out;
477 }
478 }
7c11147d 479
e51060f0 480 list_for_each_entry(cma_dev, &dev_list, list) {
3c86aa70 481 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
be9130cc
DL
482 if (listen_id_priv &&
483 listen_id_priv->cma_dev == cma_dev &&
484 listen_id_priv->id.port_num == port)
485 continue;
7c11147d 486
5d9fb044 487 gidp = rdma_protocol_roce(cma_dev->device, port) ?
7c11147d
MW
488 &iboe_gid : &gid;
489
490 ret = cma_validate_port(cma_dev->device, port, gidp,
491 dev_addr->dev_type);
492 if (!ret) {
493 id_priv->id.port_num = port;
494 goto out;
3c86aa70 495 }
e51060f0
SH
496 }
497 }
3c86aa70
EC
498
499out:
500 if (!ret)
501 cma_attach_to_dev(id_priv, cma_dev);
502
a396d43a 503 mutex_unlock(&lock);
e51060f0
SH
504 return ret;
505}
506
f17df3b0
SH
507/*
508 * Select the source IB device and address to reach the destination IB address.
509 */
510static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
511{
512 struct cma_device *cma_dev, *cur_dev;
513 struct sockaddr_ib *addr;
514 union ib_gid gid, sgid, *dgid;
515 u16 pkey, index;
8fb488d7 516 u8 p;
f17df3b0
SH
517 int i;
518
519 cma_dev = NULL;
520 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
521 dgid = (union ib_gid *) &addr->sib_addr;
522 pkey = ntohs(addr->sib_pkey);
523
524 list_for_each_entry(cur_dev, &dev_list, list) {
f17df3b0 525 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
30a74ef4 526 if (!rdma_cap_af_ib(cur_dev->device, p))
fef60902
MW
527 continue;
528
f17df3b0
SH
529 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
530 continue;
531
532 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
533 if (!memcmp(&gid, dgid, sizeof(gid))) {
534 cma_dev = cur_dev;
535 sgid = gid;
8fb488d7 536 id_priv->id.port_num = p;
f17df3b0
SH
537 goto found;
538 }
539
540 if (!cma_dev && (gid.global.subnet_prefix ==
541 dgid->global.subnet_prefix)) {
542 cma_dev = cur_dev;
543 sgid = gid;
8fb488d7 544 id_priv->id.port_num = p;
f17df3b0
SH
545 }
546 }
547 }
548 }
549
550 if (!cma_dev)
551 return -ENODEV;
552
553found:
554 cma_attach_to_dev(id_priv, cma_dev);
f17df3b0
SH
555 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
556 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
557 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
558 return 0;
559}
560
e51060f0
SH
561static void cma_deref_id(struct rdma_id_private *id_priv)
562{
563 if (atomic_dec_and_test(&id_priv->refcount))
564 complete(&id_priv->comp);
565}
566
de910bd9 567static int cma_disable_callback(struct rdma_id_private *id_priv,
550e5ca7 568 enum rdma_cm_state state)
8aa08602 569{
de910bd9
OG
570 mutex_lock(&id_priv->handler_mutex);
571 if (id_priv->state != state) {
572 mutex_unlock(&id_priv->handler_mutex);
573 return -EINVAL;
574 }
575 return 0;
e51060f0
SH
576}
577
578struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
b26f9b99
SH
579 void *context, enum rdma_port_space ps,
580 enum ib_qp_type qp_type)
e51060f0
SH
581{
582 struct rdma_id_private *id_priv;
583
584 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
585 if (!id_priv)
586 return ERR_PTR(-ENOMEM);
587
83e9502d 588 id_priv->owner = task_pid_nr(current);
550e5ca7 589 id_priv->state = RDMA_CM_IDLE;
e51060f0
SH
590 id_priv->id.context = context;
591 id_priv->id.event_handler = event_handler;
592 id_priv->id.ps = ps;
b26f9b99 593 id_priv->id.qp_type = qp_type;
e51060f0 594 spin_lock_init(&id_priv->lock);
c5483388 595 mutex_init(&id_priv->qp_mutex);
e51060f0
SH
596 init_completion(&id_priv->comp);
597 atomic_set(&id_priv->refcount, 1);
de910bd9 598 mutex_init(&id_priv->handler_mutex);
e51060f0 599 INIT_LIST_HEAD(&id_priv->listen_list);
c8f6a362 600 INIT_LIST_HEAD(&id_priv->mc_list);
e51060f0
SH
601 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
602
603 return &id_priv->id;
604}
605EXPORT_SYMBOL(rdma_create_id);
606
c8f6a362 607static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
e51060f0
SH
608{
609 struct ib_qp_attr qp_attr;
c8f6a362 610 int qp_attr_mask, ret;
e51060f0 611
c8f6a362
SH
612 qp_attr.qp_state = IB_QPS_INIT;
613 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0
SH
614 if (ret)
615 return ret;
616
c8f6a362
SH
617 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
618 if (ret)
619 return ret;
620
621 qp_attr.qp_state = IB_QPS_RTR;
622 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
623 if (ret)
624 return ret;
625
626 qp_attr.qp_state = IB_QPS_RTS;
627 qp_attr.sq_psn = 0;
628 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
629
630 return ret;
e51060f0
SH
631}
632
c8f6a362 633static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
07ebafba
TT
634{
635 struct ib_qp_attr qp_attr;
c8f6a362 636 int qp_attr_mask, ret;
07ebafba
TT
637
638 qp_attr.qp_state = IB_QPS_INIT;
c8f6a362
SH
639 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
640 if (ret)
641 return ret;
07ebafba 642
c8f6a362 643 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
07ebafba
TT
644}
645
e51060f0
SH
646int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
647 struct ib_qp_init_attr *qp_init_attr)
648{
649 struct rdma_id_private *id_priv;
650 struct ib_qp *qp;
651 int ret;
652
653 id_priv = container_of(id, struct rdma_id_private, id);
654 if (id->device != pd->device)
655 return -EINVAL;
656
657 qp = ib_create_qp(pd, qp_init_attr);
658 if (IS_ERR(qp))
659 return PTR_ERR(qp);
660
b26f9b99 661 if (id->qp_type == IB_QPT_UD)
c8f6a362
SH
662 ret = cma_init_ud_qp(id_priv, qp);
663 else
664 ret = cma_init_conn_qp(id_priv, qp);
e51060f0
SH
665 if (ret)
666 goto err;
667
668 id->qp = qp;
669 id_priv->qp_num = qp->qp_num;
e51060f0
SH
670 id_priv->srq = (qp->srq != NULL);
671 return 0;
672err:
673 ib_destroy_qp(qp);
674 return ret;
675}
676EXPORT_SYMBOL(rdma_create_qp);
677
678void rdma_destroy_qp(struct rdma_cm_id *id)
679{
c5483388
SH
680 struct rdma_id_private *id_priv;
681
682 id_priv = container_of(id, struct rdma_id_private, id);
683 mutex_lock(&id_priv->qp_mutex);
684 ib_destroy_qp(id_priv->id.qp);
685 id_priv->id.qp = NULL;
686 mutex_unlock(&id_priv->qp_mutex);
e51060f0
SH
687}
688EXPORT_SYMBOL(rdma_destroy_qp);
689
5851bb89
SH
690static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
691 struct rdma_conn_param *conn_param)
e51060f0
SH
692{
693 struct ib_qp_attr qp_attr;
694 int qp_attr_mask, ret;
dd5f03be 695 union ib_gid sgid;
e51060f0 696
c5483388
SH
697 mutex_lock(&id_priv->qp_mutex);
698 if (!id_priv->id.qp) {
699 ret = 0;
700 goto out;
701 }
e51060f0
SH
702
703 /* Need to update QP attributes from default values. */
704 qp_attr.qp_state = IB_QPS_INIT;
c5483388 705 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 706 if (ret)
c5483388 707 goto out;
e51060f0 708
c5483388 709 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
e51060f0 710 if (ret)
c5483388 711 goto out;
e51060f0
SH
712
713 qp_attr.qp_state = IB_QPS_RTR;
c5483388 714 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 715 if (ret)
c5483388 716 goto out;
e51060f0 717
dd5f03be
MB
718 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
719 qp_attr.ah_attr.grh.sgid_index, &sgid);
720 if (ret)
721 goto out;
722
fef60902
MW
723 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
724
5d9fb044 725 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
dd5f03be
MB
726 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
727
728 if (ret)
729 goto out;
730 }
5851bb89
SH
731 if (conn_param)
732 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
c5483388
SH
733 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
734out:
735 mutex_unlock(&id_priv->qp_mutex);
736 return ret;
e51060f0
SH
737}
738
5851bb89
SH
739static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
740 struct rdma_conn_param *conn_param)
e51060f0
SH
741{
742 struct ib_qp_attr qp_attr;
743 int qp_attr_mask, ret;
744
c5483388
SH
745 mutex_lock(&id_priv->qp_mutex);
746 if (!id_priv->id.qp) {
747 ret = 0;
748 goto out;
749 }
e51060f0
SH
750
751 qp_attr.qp_state = IB_QPS_RTS;
c5483388 752 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
e51060f0 753 if (ret)
c5483388 754 goto out;
e51060f0 755
5851bb89
SH
756 if (conn_param)
757 qp_attr.max_rd_atomic = conn_param->initiator_depth;
c5483388
SH
758 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
759out:
760 mutex_unlock(&id_priv->qp_mutex);
761 return ret;
e51060f0
SH
762}
763
c5483388 764static int cma_modify_qp_err(struct rdma_id_private *id_priv)
e51060f0
SH
765{
766 struct ib_qp_attr qp_attr;
c5483388 767 int ret;
e51060f0 768
c5483388
SH
769 mutex_lock(&id_priv->qp_mutex);
770 if (!id_priv->id.qp) {
771 ret = 0;
772 goto out;
773 }
e51060f0
SH
774
775 qp_attr.qp_state = IB_QPS_ERR;
c5483388
SH
776 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
777out:
778 mutex_unlock(&id_priv->qp_mutex);
779 return ret;
e51060f0
SH
780}
781
c8f6a362
SH
782static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
783 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
784{
785 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
786 int ret;
3c86aa70
EC
787 u16 pkey;
788
227128fc 789 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
3c86aa70 790 pkey = 0xffff;
fef60902
MW
791 else
792 pkey = ib_addr_get_pkey(dev_addr);
c8f6a362
SH
793
794 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
3c86aa70 795 pkey, &qp_attr->pkey_index);
c8f6a362
SH
796 if (ret)
797 return ret;
798
799 qp_attr->port_num = id_priv->id.port_num;
800 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
801
b26f9b99 802 if (id_priv->id.qp_type == IB_QPT_UD) {
5c438135 803 ret = cma_set_qkey(id_priv, 0);
d2ca39f2
YE
804 if (ret)
805 return ret;
806
c8f6a362
SH
807 qp_attr->qkey = id_priv->qkey;
808 *qp_attr_mask |= IB_QP_QKEY;
809 } else {
810 qp_attr->qp_access_flags = 0;
811 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
812 }
813 return 0;
814}
815
e51060f0
SH
816int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
817 int *qp_attr_mask)
818{
819 struct rdma_id_private *id_priv;
c8f6a362 820 int ret = 0;
e51060f0
SH
821
822 id_priv = container_of(id, struct rdma_id_private, id);
72219cea 823 if (rdma_cap_ib_cm(id->device, id->port_num)) {
b26f9b99 824 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
c8f6a362
SH
825 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
826 else
827 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
828 qp_attr_mask);
dd5f03be 829
e51060f0
SH
830 if (qp_attr->qp_state == IB_QPS_RTR)
831 qp_attr->rq_psn = id_priv->seq_num;
04215330 832 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
c8f6a362 833 if (!id_priv->cm_id.iw) {
8f076531 834 qp_attr->qp_access_flags = 0;
c8f6a362
SH
835 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
836 } else
837 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
838 qp_attr_mask);
21655afc 839 } else
e51060f0 840 ret = -ENOSYS;
e51060f0
SH
841
842 return ret;
843}
844EXPORT_SYMBOL(rdma_init_qp_attr);
845
846static inline int cma_zero_addr(struct sockaddr *addr)
847{
2e2d190c
SH
848 switch (addr->sa_family) {
849 case AF_INET:
850 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
851 case AF_INET6:
852 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
853 case AF_IB:
854 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
855 default:
856 return 0;
e51060f0
SH
857 }
858}
859
860static inline int cma_loopback_addr(struct sockaddr *addr)
861{
2e2d190c
SH
862 switch (addr->sa_family) {
863 case AF_INET:
864 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
865 case AF_INET6:
866 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
867 case AF_IB:
868 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
869 default:
870 return 0;
871 }
e51060f0
SH
872}
873
874static inline int cma_any_addr(struct sockaddr *addr)
875{
876 return cma_zero_addr(addr) || cma_loopback_addr(addr);
877}
878
43b752da
HS
879static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
880{
881 if (src->sa_family != dst->sa_family)
882 return -1;
883
884 switch (src->sa_family) {
885 case AF_INET:
886 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
887 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
2e2d190c 888 case AF_INET6:
43b752da
HS
889 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
890 &((struct sockaddr_in6 *) dst)->sin6_addr);
2e2d190c
SH
891 default:
892 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
893 &((struct sockaddr_ib *) dst)->sib_addr);
43b752da
HS
894 }
895}
896
58afdcb7 897static __be16 cma_port(struct sockaddr *addr)
628e5f6d 898{
58afdcb7
SH
899 struct sockaddr_ib *sib;
900
901 switch (addr->sa_family) {
902 case AF_INET:
628e5f6d 903 return ((struct sockaddr_in *) addr)->sin_port;
58afdcb7 904 case AF_INET6:
628e5f6d 905 return ((struct sockaddr_in6 *) addr)->sin6_port;
58afdcb7
SH
906 case AF_IB:
907 sib = (struct sockaddr_ib *) addr;
908 return htons((u16) (be64_to_cpu(sib->sib_sid) &
909 be64_to_cpu(sib->sib_sid_mask)));
910 default:
911 return 0;
912 }
628e5f6d
SH
913}
914
e51060f0
SH
915static inline int cma_any_port(struct sockaddr *addr)
916{
628e5f6d 917 return !cma_port(addr);
e51060f0
SH
918}
919
0c505f70
HE
920static void cma_save_ib_info(struct sockaddr *src_addr,
921 struct sockaddr *dst_addr,
922 struct rdma_cm_id *listen_id,
fbaa1a6d 923 struct ib_sa_path_rec *path)
e51060f0 924{
fbaa1a6d
SH
925 struct sockaddr_ib *listen_ib, *ib;
926
927 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
0c505f70
HE
928 if (src_addr) {
929 ib = (struct sockaddr_ib *)src_addr;
930 ib->sib_family = AF_IB;
931 if (path) {
932 ib->sib_pkey = path->pkey;
933 ib->sib_flowinfo = path->flow_label;
934 memcpy(&ib->sib_addr, &path->sgid, 16);
935 ib->sib_sid = path->service_id;
936 ib->sib_scope_id = 0;
937 } else {
938 ib->sib_pkey = listen_ib->sib_pkey;
939 ib->sib_flowinfo = listen_ib->sib_flowinfo;
940 ib->sib_addr = listen_ib->sib_addr;
941 ib->sib_sid = listen_ib->sib_sid;
942 ib->sib_scope_id = listen_ib->sib_scope_id;
943 }
944 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
c07678bb 945 }
0c505f70
HE
946 if (dst_addr) {
947 ib = (struct sockaddr_ib *)dst_addr;
948 ib->sib_family = AF_IB;
949 if (path) {
950 ib->sib_pkey = path->pkey;
951 ib->sib_flowinfo = path->flow_label;
952 memcpy(&ib->sib_addr, &path->dgid, 16);
953 }
c07678bb 954 }
fbaa1a6d 955}
e51060f0 956
0c505f70
HE
957static void cma_save_ip4_info(struct sockaddr *src_addr,
958 struct sockaddr *dst_addr,
959 struct cma_hdr *hdr,
960 __be16 local_port)
fbaa1a6d 961{
28521440 962 struct sockaddr_in *ip4;
e51060f0 963
0c505f70
HE
964 if (src_addr) {
965 ip4 = (struct sockaddr_in *)src_addr;
966 ip4->sin_family = AF_INET;
967 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
968 ip4->sin_port = local_port;
969 }
fbaa1a6d 970
0c505f70
HE
971 if (dst_addr) {
972 ip4 = (struct sockaddr_in *)dst_addr;
973 ip4->sin_family = AF_INET;
974 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
975 ip4->sin_port = hdr->port;
976 }
e51060f0
SH
977}
978
0c505f70
HE
979static void cma_save_ip6_info(struct sockaddr *src_addr,
980 struct sockaddr *dst_addr,
981 struct cma_hdr *hdr,
982 __be16 local_port)
e51060f0 983{
28521440 984 struct sockaddr_in6 *ip6;
e51060f0 985
0c505f70
HE
986 if (src_addr) {
987 ip6 = (struct sockaddr_in6 *)src_addr;
988 ip6->sin6_family = AF_INET6;
989 ip6->sin6_addr = hdr->dst_addr.ip6;
990 ip6->sin6_port = local_port;
991 }
fbaa1a6d 992
0c505f70
HE
993 if (dst_addr) {
994 ip6 = (struct sockaddr_in6 *)dst_addr;
995 ip6->sin6_family = AF_INET6;
996 ip6->sin6_addr = hdr->src_addr.ip6;
997 ip6->sin6_port = hdr->port;
998 }
fbaa1a6d
SH
999}
1000
0c505f70 1001static u16 cma_port_from_service_id(__be64 service_id)
fbaa1a6d 1002{
0c505f70
HE
1003 return (u16)be64_to_cpu(service_id);
1004}
fbaa1a6d 1005
0c505f70
HE
1006static int cma_save_ip_info(struct sockaddr *src_addr,
1007 struct sockaddr *dst_addr,
1008 struct ib_cm_event *ib_event,
1009 __be64 service_id)
1010{
1011 struct cma_hdr *hdr;
1012 __be16 port;
fbaa1a6d
SH
1013
1014 hdr = ib_event->private_data;
1015 if (hdr->cma_version != CMA_VERSION)
1016 return -EINVAL;
1017
0c505f70
HE
1018 port = htons(cma_port_from_service_id(service_id));
1019
fbaa1a6d 1020 switch (cma_get_ip_ver(hdr)) {
e51060f0 1021 case 4:
0c505f70 1022 cma_save_ip4_info(src_addr, dst_addr, hdr, port);
e51060f0
SH
1023 break;
1024 case 6:
0c505f70 1025 cma_save_ip6_info(src_addr, dst_addr, hdr, port);
e51060f0
SH
1026 break;
1027 default:
4c21b5bc 1028 return -EAFNOSUPPORT;
e51060f0 1029 }
0c505f70 1030
fbaa1a6d 1031 return 0;
e51060f0
SH
1032}
1033
0c505f70
HE
1034static int cma_save_net_info(struct sockaddr *src_addr,
1035 struct sockaddr *dst_addr,
1036 struct rdma_cm_id *listen_id,
1037 struct ib_cm_event *ib_event,
1038 sa_family_t sa_family, __be64 service_id)
1039{
1040 if (sa_family == AF_IB) {
1041 if (ib_event->event == IB_CM_REQ_RECEIVED)
1042 cma_save_ib_info(src_addr, dst_addr, listen_id,
1043 ib_event->param.req_rcvd.primary_path);
1044 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1045 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1046 return 0;
1047 }
1048
1049 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1050}
1051
4c21b5bc
HE
1052static int cma_save_req_info(const struct ib_cm_event *ib_event,
1053 struct cma_req_info *req)
1054{
1055 const struct ib_cm_req_event_param *req_param =
1056 &ib_event->param.req_rcvd;
1057 const struct ib_cm_sidr_req_event_param *sidr_param =
1058 &ib_event->param.sidr_req_rcvd;
1059
1060 switch (ib_event->event) {
1061 case IB_CM_REQ_RECEIVED:
1062 req->device = req_param->listen_id->device;
1063 req->port = req_param->port;
1064 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1065 sizeof(req->local_gid));
1066 req->has_gid = true;
1067 req->service_id = req_param->primary_path->service_id;
1068 req->pkey = req_param->bth_pkey;
1069 break;
1070 case IB_CM_SIDR_REQ_RECEIVED:
1071 req->device = sidr_param->listen_id->device;
1072 req->port = sidr_param->port;
1073 req->has_gid = false;
1074 req->service_id = sidr_param->service_id;
1075 req->pkey = sidr_param->bth_pkey;
1076 break;
1077 default:
1078 return -EINVAL;
1079 }
1080
1081 return 0;
1082}
1083
1084static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
1085 const struct cma_req_info *req)
1086{
1087 struct sockaddr_storage listen_addr_storage;
1088 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage;
1089 struct net_device *net_dev;
1090 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1091 int err;
1092
1093 err = cma_save_ip_info(listen_addr, NULL, ib_event, req->service_id);
1094 if (err)
1095 return ERR_PTR(err);
1096
1097 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
1098 gid, listen_addr);
1099 if (!net_dev)
1100 return ERR_PTR(-ENODEV);
1101
1102 return net_dev;
1103}
1104
1105static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id)
1106{
1107 return (be64_to_cpu(service_id) >> 16) & 0xffff;
1108}
1109
1110static bool cma_match_private_data(struct rdma_id_private *id_priv,
1111 const struct cma_hdr *hdr)
1112{
1113 struct sockaddr *addr = cma_src_addr(id_priv);
1114 __be32 ip4_addr;
1115 struct in6_addr ip6_addr;
1116
1117 if (cma_any_addr(addr) && !id_priv->afonly)
1118 return true;
1119
1120 switch (addr->sa_family) {
1121 case AF_INET:
1122 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1123 if (cma_get_ip_ver(hdr) != 4)
1124 return false;
1125 if (!cma_any_addr(addr) &&
1126 hdr->dst_addr.ip4.addr != ip4_addr)
1127 return false;
1128 break;
1129 case AF_INET6:
1130 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1131 if (cma_get_ip_ver(hdr) != 6)
1132 return false;
1133 if (!cma_any_addr(addr) &&
1134 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1135 return false;
1136 break;
1137 case AF_IB:
1138 return true;
1139 default:
1140 return false;
1141 }
1142
1143 return true;
1144}
1145
1146static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
1147 const struct net_device *net_dev)
1148{
1149 const struct rdma_addr *addr = &id_priv->id.route.addr;
1150
1151 if (!net_dev)
1152 /* This request is an AF_IB request */
1153 return addr->src_addr.ss_family == AF_IB;
1154
1155 return !addr->dev_addr.bound_dev_if ||
1156 (net_eq(dev_net(net_dev), &init_net) &&
1157 addr->dev_addr.bound_dev_if == net_dev->ifindex);
1158}
1159
1160static struct rdma_id_private *cma_find_listener(
1161 const struct rdma_bind_list *bind_list,
1162 const struct ib_cm_id *cm_id,
1163 const struct ib_cm_event *ib_event,
1164 const struct cma_req_info *req,
1165 const struct net_device *net_dev)
1166{
1167 struct rdma_id_private *id_priv, *id_priv_dev;
1168
1169 if (!bind_list)
1170 return ERR_PTR(-EINVAL);
1171
1172 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1173 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1174 if (id_priv->id.device == cm_id->device &&
1175 cma_match_net_dev(id_priv, net_dev))
1176 return id_priv;
1177 list_for_each_entry(id_priv_dev,
1178 &id_priv->listen_list,
1179 listen_list) {
1180 if (id_priv_dev->id.device == cm_id->device &&
1181 cma_match_net_dev(id_priv_dev, net_dev))
1182 return id_priv_dev;
1183 }
1184 }
1185 }
1186
1187 return ERR_PTR(-EINVAL);
1188}
1189
1190static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
1191 struct ib_cm_event *ib_event)
1192{
1193 struct cma_req_info req;
1194 struct rdma_bind_list *bind_list;
1195 struct rdma_id_private *id_priv;
1196 struct net_device *net_dev;
1197 int err;
1198
1199 err = cma_save_req_info(ib_event, &req);
1200 if (err)
1201 return ERR_PTR(err);
1202
1203 net_dev = cma_get_net_dev(ib_event, &req);
1204 if (IS_ERR(net_dev)) {
1205 if (PTR_ERR(net_dev) == -EAFNOSUPPORT) {
1206 /* Assuming the protocol is AF_IB */
1207 net_dev = NULL;
1208 } else {
1209 return ERR_CAST(net_dev);
1210 }
1211 }
1212
1213 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
1214 cma_port_from_service_id(req.service_id));
1215 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, net_dev);
1216
1217 dev_put(net_dev);
1218
1219 return id_priv;
1220}
1221
e8160e15 1222static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
e51060f0 1223{
e8160e15 1224 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
e51060f0
SH
1225}
1226
e51060f0
SH
1227static void cma_cancel_route(struct rdma_id_private *id_priv)
1228{
fe53ba2f 1229 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
e51060f0
SH
1230 if (id_priv->query)
1231 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
e51060f0
SH
1232 }
1233}
1234
e51060f0
SH
1235static void cma_cancel_listens(struct rdma_id_private *id_priv)
1236{
1237 struct rdma_id_private *dev_id_priv;
1238
d02d1f53
SH
1239 /*
1240 * Remove from listen_any_list to prevent added devices from spawning
1241 * additional listen requests.
1242 */
e51060f0
SH
1243 mutex_lock(&lock);
1244 list_del(&id_priv->list);
1245
1246 while (!list_empty(&id_priv->listen_list)) {
1247 dev_id_priv = list_entry(id_priv->listen_list.next,
1248 struct rdma_id_private, listen_list);
d02d1f53
SH
1249 /* sync with device removal to avoid duplicate destruction */
1250 list_del_init(&dev_id_priv->list);
1251 list_del(&dev_id_priv->listen_list);
1252 mutex_unlock(&lock);
1253
1254 rdma_destroy_id(&dev_id_priv->id);
1255 mutex_lock(&lock);
e51060f0
SH
1256 }
1257 mutex_unlock(&lock);
1258}
1259
1260static void cma_cancel_operation(struct rdma_id_private *id_priv,
550e5ca7 1261 enum rdma_cm_state state)
e51060f0
SH
1262{
1263 switch (state) {
550e5ca7 1264 case RDMA_CM_ADDR_QUERY:
e51060f0
SH
1265 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1266 break;
550e5ca7 1267 case RDMA_CM_ROUTE_QUERY:
e51060f0
SH
1268 cma_cancel_route(id_priv);
1269 break;
550e5ca7 1270 case RDMA_CM_LISTEN:
f4753834 1271 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
e51060f0
SH
1272 cma_cancel_listens(id_priv);
1273 break;
1274 default:
1275 break;
1276 }
1277}
1278
1279static void cma_release_port(struct rdma_id_private *id_priv)
1280{
1281 struct rdma_bind_list *bind_list = id_priv->bind_list;
1282
1283 if (!bind_list)
1284 return;
1285
1286 mutex_lock(&lock);
1287 hlist_del(&id_priv->node);
1288 if (hlist_empty(&bind_list->owners)) {
aac978e1 1289 cma_ps_remove(bind_list->ps, bind_list->port);
e51060f0
SH
1290 kfree(bind_list);
1291 }
1292 mutex_unlock(&lock);
1293}
1294
c8f6a362
SH
1295static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1296{
1297 struct cma_multicast *mc;
1298
1299 while (!list_empty(&id_priv->mc_list)) {
1300 mc = container_of(id_priv->mc_list.next,
1301 struct cma_multicast, list);
1302 list_del(&mc->list);
a31ad3b0 1303 if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
5c9a5282 1304 id_priv->id.port_num)) {
3c86aa70
EC
1305 ib_sa_free_multicast(mc->multicast.ib);
1306 kfree(mc);
5c9a5282 1307 } else
3c86aa70 1308 kref_put(&mc->mcref, release_mc);
c8f6a362
SH
1309 }
1310}
1311
e51060f0
SH
1312void rdma_destroy_id(struct rdma_cm_id *id)
1313{
1314 struct rdma_id_private *id_priv;
550e5ca7 1315 enum rdma_cm_state state;
e51060f0
SH
1316
1317 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 1318 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
e51060f0
SH
1319 cma_cancel_operation(id_priv, state);
1320
a396d43a
SH
1321 /*
1322 * Wait for any active callback to finish. New callbacks will find
1323 * the id_priv state set to destroying and abort.
1324 */
1325 mutex_lock(&id_priv->handler_mutex);
1326 mutex_unlock(&id_priv->handler_mutex);
1327
e51060f0 1328 if (id_priv->cma_dev) {
72219cea 1329 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
0c9361fc 1330 if (id_priv->cm_id.ib)
e51060f0 1331 ib_destroy_cm_id(id_priv->cm_id.ib);
04215330 1332 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
0c9361fc 1333 if (id_priv->cm_id.iw)
07ebafba 1334 iw_destroy_cm_id(id_priv->cm_id.iw);
e51060f0 1335 }
c8f6a362 1336 cma_leave_mc_groups(id_priv);
a396d43a 1337 cma_release_dev(id_priv);
e51060f0
SH
1338 }
1339
1340 cma_release_port(id_priv);
1341 cma_deref_id(id_priv);
1342 wait_for_completion(&id_priv->comp);
1343
d02d1f53
SH
1344 if (id_priv->internal_id)
1345 cma_deref_id(id_priv->id.context);
1346
e51060f0
SH
1347 kfree(id_priv->id.route.path_rec);
1348 kfree(id_priv);
1349}
1350EXPORT_SYMBOL(rdma_destroy_id);
1351
1352static int cma_rep_recv(struct rdma_id_private *id_priv)
1353{
1354 int ret;
1355
5851bb89 1356 ret = cma_modify_qp_rtr(id_priv, NULL);
e51060f0
SH
1357 if (ret)
1358 goto reject;
1359
5851bb89 1360 ret = cma_modify_qp_rts(id_priv, NULL);
e51060f0
SH
1361 if (ret)
1362 goto reject;
1363
1364 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1365 if (ret)
1366 goto reject;
1367
1368 return 0;
1369reject:
c5483388 1370 cma_modify_qp_err(id_priv);
e51060f0
SH
1371 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1372 NULL, 0, NULL, 0);
1373 return ret;
1374}
1375
a1b1b61f
SH
1376static void cma_set_rep_event_data(struct rdma_cm_event *event,
1377 struct ib_cm_rep_event_param *rep_data,
1378 void *private_data)
1379{
1380 event->param.conn.private_data = private_data;
1381 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1382 event->param.conn.responder_resources = rep_data->responder_resources;
1383 event->param.conn.initiator_depth = rep_data->initiator_depth;
1384 event->param.conn.flow_control = rep_data->flow_control;
1385 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1386 event->param.conn.srq = rep_data->srq;
1387 event->param.conn.qp_num = rep_data->remote_qpn;
1388}
1389
e51060f0
SH
1390static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1391{
1392 struct rdma_id_private *id_priv = cm_id->context;
a1b1b61f
SH
1393 struct rdma_cm_event event;
1394 int ret = 0;
e51060f0 1395
38ca83a5 1396 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
550e5ca7 1397 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
38ca83a5 1398 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
550e5ca7 1399 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
8aa08602 1400 return 0;
e51060f0 1401
a1b1b61f 1402 memset(&event, 0, sizeof event);
e51060f0
SH
1403 switch (ib_event->event) {
1404 case IB_CM_REQ_ERROR:
1405 case IB_CM_REP_ERROR:
a1b1b61f
SH
1406 event.event = RDMA_CM_EVENT_UNREACHABLE;
1407 event.status = -ETIMEDOUT;
e51060f0
SH
1408 break;
1409 case IB_CM_REP_RECEIVED:
01602f11 1410 if (id_priv->id.qp) {
a1b1b61f
SH
1411 event.status = cma_rep_recv(id_priv);
1412 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1413 RDMA_CM_EVENT_ESTABLISHED;
01602f11 1414 } else {
a1b1b61f 1415 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
01602f11 1416 }
a1b1b61f
SH
1417 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1418 ib_event->private_data);
e51060f0
SH
1419 break;
1420 case IB_CM_RTU_RECEIVED:
0fe313b0
SH
1421 case IB_CM_USER_ESTABLISHED:
1422 event.event = RDMA_CM_EVENT_ESTABLISHED;
e51060f0
SH
1423 break;
1424 case IB_CM_DREQ_ERROR:
a1b1b61f 1425 event.status = -ETIMEDOUT; /* fall through */
e51060f0
SH
1426 case IB_CM_DREQ_RECEIVED:
1427 case IB_CM_DREP_RECEIVED:
550e5ca7
NM
1428 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1429 RDMA_CM_DISCONNECT))
e51060f0 1430 goto out;
a1b1b61f 1431 event.event = RDMA_CM_EVENT_DISCONNECTED;
e51060f0
SH
1432 break;
1433 case IB_CM_TIMEWAIT_EXIT:
38ca83a5
AV
1434 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1435 break;
e51060f0
SH
1436 case IB_CM_MRA_RECEIVED:
1437 /* ignore event */
1438 goto out;
1439 case IB_CM_REJ_RECEIVED:
c5483388 1440 cma_modify_qp_err(id_priv);
a1b1b61f
SH
1441 event.status = ib_event->param.rej_rcvd.reason;
1442 event.event = RDMA_CM_EVENT_REJECTED;
1443 event.param.conn.private_data = ib_event->private_data;
1444 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
e51060f0
SH
1445 break;
1446 default:
468f2239 1447 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
e51060f0
SH
1448 ib_event->event);
1449 goto out;
1450 }
1451
a1b1b61f 1452 ret = id_priv->id.event_handler(&id_priv->id, &event);
e51060f0
SH
1453 if (ret) {
1454 /* Destroy the CM ID by returning a non-zero value. */
1455 id_priv->cm_id.ib = NULL;
550e5ca7 1456 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 1457 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
1458 rdma_destroy_id(&id_priv->id);
1459 return ret;
1460 }
1461out:
de910bd9 1462 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
1463 return ret;
1464}
1465
628e5f6d
SH
1466static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1467 struct ib_cm_event *ib_event)
e51060f0
SH
1468{
1469 struct rdma_id_private *id_priv;
1470 struct rdma_cm_id *id;
1471 struct rdma_route *rt;
0c505f70
HE
1472 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
1473 const __be64 service_id =
1474 ib_event->param.req_rcvd.primary_path->service_id;
64c5e613 1475 int ret;
e51060f0
SH
1476
1477 id = rdma_create_id(listen_id->event_handler, listen_id->context,
b26f9b99 1478 listen_id->ps, ib_event->param.req_rcvd.qp_type);
e51060f0 1479 if (IS_ERR(id))
0c9361fc 1480 return NULL;
3f168d2b 1481
f4753834 1482 id_priv = container_of(id, struct rdma_id_private, id);
0c505f70
HE
1483 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
1484 (struct sockaddr *)&id->route.addr.dst_addr,
1485 listen_id, ib_event, ss_family, service_id))
fbaa1a6d 1486 goto err;
e51060f0
SH
1487
1488 rt = &id->route;
1489 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
3f168d2b
KK
1490 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1491 GFP_KERNEL);
e51060f0 1492 if (!rt->path_rec)
0c9361fc 1493 goto err;
e51060f0 1494
e51060f0
SH
1495 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1496 if (rt->num_paths == 2)
1497 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1498
f4753834 1499 if (cma_any_addr(cma_src_addr(id_priv))) {
6f8372b6
SH
1500 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1501 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
46ea5061 1502 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
6f8372b6 1503 } else {
f4753834 1504 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
6f8372b6 1505 if (ret)
0c9361fc 1506 goto err;
6f8372b6
SH
1507 }
1508 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
e51060f0 1509
550e5ca7 1510 id_priv->state = RDMA_CM_CONNECT;
e51060f0 1511 return id_priv;
3f168d2b 1512
3f168d2b 1513err:
0c9361fc 1514 rdma_destroy_id(id);
e51060f0
SH
1515 return NULL;
1516}
1517
628e5f6d
SH
1518static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1519 struct ib_cm_event *ib_event)
1520{
1521 struct rdma_id_private *id_priv;
1522 struct rdma_cm_id *id;
0c505f70 1523 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
628e5f6d
SH
1524 int ret;
1525
1526 id = rdma_create_id(listen_id->event_handler, listen_id->context,
b26f9b99 1527 listen_id->ps, IB_QPT_UD);
628e5f6d
SH
1528 if (IS_ERR(id))
1529 return NULL;
1530
f4753834 1531 id_priv = container_of(id, struct rdma_id_private, id);
0c505f70
HE
1532 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
1533 (struct sockaddr *)&id->route.addr.dst_addr,
1534 listen_id, ib_event, ss_family,
1535 ib_event->param.sidr_req_rcvd.service_id))
628e5f6d
SH
1536 goto err;
1537
6f8372b6 1538 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
f4753834 1539 ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
6f8372b6
SH
1540 if (ret)
1541 goto err;
1542 }
628e5f6d 1543
550e5ca7 1544 id_priv->state = RDMA_CM_CONNECT;
628e5f6d
SH
1545 return id_priv;
1546err:
1547 rdma_destroy_id(id);
1548 return NULL;
1549}
1550
a1b1b61f
SH
1551static void cma_set_req_event_data(struct rdma_cm_event *event,
1552 struct ib_cm_req_event_param *req_data,
1553 void *private_data, int offset)
1554{
1555 event->param.conn.private_data = private_data + offset;
1556 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1557 event->param.conn.responder_resources = req_data->responder_resources;
1558 event->param.conn.initiator_depth = req_data->initiator_depth;
1559 event->param.conn.flow_control = req_data->flow_control;
1560 event->param.conn.retry_count = req_data->retry_count;
1561 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1562 event->param.conn.srq = req_data->srq;
1563 event->param.conn.qp_num = req_data->remote_qpn;
1564}
1565
9595480c
HS
1566static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1567{
4dd81e89 1568 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
9595480c
HS
1569 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1570 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1571 (id->qp_type == IB_QPT_UD)) ||
1572 (!id->qp_type));
1573}
1574
e51060f0
SH
1575static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1576{
1577 struct rdma_id_private *listen_id, *conn_id;
a1b1b61f 1578 struct rdma_cm_event event;
e51060f0
SH
1579 int offset, ret;
1580
4c21b5bc
HE
1581 listen_id = cma_id_from_event(cm_id, ib_event);
1582 if (IS_ERR(listen_id))
1583 return PTR_ERR(listen_id);
1584
9595480c
HS
1585 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
1586 return -EINVAL;
1587
550e5ca7 1588 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
8aa08602 1589 return -ECONNABORTED;
e51060f0 1590
628e5f6d 1591 memset(&event, 0, sizeof event);
e8160e15 1592 offset = cma_user_data_offset(listen_id);
628e5f6d 1593 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
9595480c 1594 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
628e5f6d
SH
1595 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1596 event.param.ud.private_data = ib_event->private_data + offset;
1597 event.param.ud.private_data_len =
1598 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1599 } else {
1600 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1601 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1602 ib_event->private_data, offset);
1603 }
e51060f0
SH
1604 if (!conn_id) {
1605 ret = -ENOMEM;
b6cec8aa 1606 goto err1;
e51060f0
SH
1607 }
1608
de910bd9 1609 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
be9130cc 1610 ret = cma_acquire_dev(conn_id, listen_id);
a1a733f6 1611 if (ret)
b6cec8aa 1612 goto err2;
e51060f0
SH
1613
1614 conn_id->cm_id.ib = cm_id;
1615 cm_id->context = conn_id;
1616 cm_id->cm_handler = cma_ib_handler;
1617
25ae21a1
SH
1618 /*
1619 * Protect against the user destroying conn_id from another thread
1620 * until we're done accessing it.
1621 */
1622 atomic_inc(&conn_id->refcount);
a1b1b61f 1623 ret = conn_id->id.event_handler(&conn_id->id, &event);
b6cec8aa
SH
1624 if (ret)
1625 goto err3;
b6cec8aa
SH
1626 /*
1627 * Acquire mutex to prevent user executing rdma_destroy_id()
1628 * while we're accessing the cm_id.
1629 */
1630 mutex_lock(&lock);
dd5f03be
MB
1631 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1632 (conn_id->id.qp_type != IB_QPT_UD))
b6cec8aa
SH
1633 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1634 mutex_unlock(&lock);
1635 mutex_unlock(&conn_id->handler_mutex);
1636 mutex_unlock(&listen_id->handler_mutex);
25ae21a1 1637 cma_deref_id(conn_id);
b6cec8aa 1638 return 0;
a1a733f6 1639
b6cec8aa
SH
1640err3:
1641 cma_deref_id(conn_id);
a1a733f6
KK
1642 /* Destroy the CM ID by returning a non-zero value. */
1643 conn_id->cm_id.ib = NULL;
b6cec8aa 1644err2:
550e5ca7 1645 cma_exch(conn_id, RDMA_CM_DESTROYING);
de910bd9 1646 mutex_unlock(&conn_id->handler_mutex);
b6cec8aa 1647err1:
de910bd9 1648 mutex_unlock(&listen_id->handler_mutex);
b6cec8aa
SH
1649 if (conn_id)
1650 rdma_destroy_id(&conn_id->id);
e51060f0
SH
1651 return ret;
1652}
1653
cf53936f 1654__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
e51060f0 1655{
496ce3ce
SH
1656 if (addr->sa_family == AF_IB)
1657 return ((struct sockaddr_ib *) addr)->sib_sid;
1658
cf53936f 1659 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
e51060f0 1660}
cf53936f 1661EXPORT_SYMBOL(rdma_get_service_id);
e51060f0
SH
1662
1663static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1664 struct ib_cm_compare_data *compare)
1665{
1666 struct cma_hdr *cma_data, *cma_mask;
1b90c137 1667 __be32 ip4_addr;
e51060f0
SH
1668 struct in6_addr ip6_addr;
1669
1670 memset(compare, 0, sizeof *compare);
1671 cma_data = (void *) compare->data;
1672 cma_mask = (void *) compare->mask;
e51060f0
SH
1673
1674 switch (addr->sa_family) {
1675 case AF_INET:
1676 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
01602f11
SH
1677 cma_set_ip_ver(cma_data, 4);
1678 cma_set_ip_ver(cma_mask, 0xF);
1679 if (!cma_any_addr(addr)) {
1680 cma_data->dst_addr.ip4.addr = ip4_addr;
1681 cma_mask->dst_addr.ip4.addr = htonl(~0);
e51060f0
SH
1682 }
1683 break;
1684 case AF_INET6:
1685 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
01602f11
SH
1686 cma_set_ip_ver(cma_data, 6);
1687 cma_set_ip_ver(cma_mask, 0xF);
1688 if (!cma_any_addr(addr)) {
1689 cma_data->dst_addr.ip6 = ip6_addr;
1690 memset(&cma_mask->dst_addr.ip6, 0xFF,
1691 sizeof cma_mask->dst_addr.ip6);
e51060f0
SH
1692 }
1693 break;
1694 default:
1695 break;
1696 }
1697}
1698
07ebafba
TT
1699static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1700{
1701 struct rdma_id_private *id_priv = iw_id->context;
a1b1b61f 1702 struct rdma_cm_event event;
07ebafba 1703 int ret = 0;
24d44a39
SW
1704 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1705 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
07ebafba 1706
550e5ca7 1707 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
be65f086 1708 return 0;
07ebafba 1709
be65f086 1710 memset(&event, 0, sizeof event);
07ebafba
TT
1711 switch (iw_event->event) {
1712 case IW_CM_EVENT_CLOSE:
a1b1b61f 1713 event.event = RDMA_CM_EVENT_DISCONNECTED;
07ebafba
TT
1714 break;
1715 case IW_CM_EVENT_CONNECT_REPLY:
24d44a39
SW
1716 memcpy(cma_src_addr(id_priv), laddr,
1717 rdma_addr_size(laddr));
1718 memcpy(cma_dst_addr(id_priv), raddr,
1719 rdma_addr_size(raddr));
881a045f
SW
1720 switch (iw_event->status) {
1721 case 0:
a1b1b61f 1722 event.event = RDMA_CM_EVENT_ESTABLISHED;
3ebeebc3
KS
1723 event.param.conn.initiator_depth = iw_event->ird;
1724 event.param.conn.responder_resources = iw_event->ord;
881a045f
SW
1725 break;
1726 case -ECONNRESET:
1727 case -ECONNREFUSED:
1728 event.event = RDMA_CM_EVENT_REJECTED;
1729 break;
1730 case -ETIMEDOUT:
1731 event.event = RDMA_CM_EVENT_UNREACHABLE;
1732 break;
1733 default:
1734 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1735 break;
1736 }
07ebafba
TT
1737 break;
1738 case IW_CM_EVENT_ESTABLISHED:
a1b1b61f 1739 event.event = RDMA_CM_EVENT_ESTABLISHED;
3ebeebc3
KS
1740 event.param.conn.initiator_depth = iw_event->ird;
1741 event.param.conn.responder_resources = iw_event->ord;
07ebafba
TT
1742 break;
1743 default:
1744 BUG_ON(1);
1745 }
1746
a1b1b61f
SH
1747 event.status = iw_event->status;
1748 event.param.conn.private_data = iw_event->private_data;
1749 event.param.conn.private_data_len = iw_event->private_data_len;
1750 ret = id_priv->id.event_handler(&id_priv->id, &event);
07ebafba
TT
1751 if (ret) {
1752 /* Destroy the CM ID by returning a non-zero value. */
1753 id_priv->cm_id.iw = NULL;
550e5ca7 1754 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 1755 mutex_unlock(&id_priv->handler_mutex);
07ebafba
TT
1756 rdma_destroy_id(&id_priv->id);
1757 return ret;
1758 }
1759
de910bd9 1760 mutex_unlock(&id_priv->handler_mutex);
07ebafba
TT
1761 return ret;
1762}
1763
1764static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1765 struct iw_cm_event *iw_event)
1766{
1767 struct rdma_cm_id *new_cm_id;
1768 struct rdma_id_private *listen_id, *conn_id;
a1b1b61f 1769 struct rdma_cm_event event;
07ebafba 1770 int ret;
8d8293cf 1771 struct ib_device_attr attr;
24d44a39
SW
1772 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1773 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
07ebafba
TT
1774
1775 listen_id = cm_id->context;
550e5ca7 1776 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
8aa08602 1777 return -ECONNABORTED;
07ebafba
TT
1778
1779 /* Create a new RDMA id for the new IW CM ID */
1780 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1781 listen_id->id.context,
b26f9b99 1782 RDMA_PS_TCP, IB_QPT_RC);
10f32065 1783 if (IS_ERR(new_cm_id)) {
07ebafba
TT
1784 ret = -ENOMEM;
1785 goto out;
1786 }
1787 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
de910bd9 1788 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
550e5ca7 1789 conn_id->state = RDMA_CM_CONNECT;
07ebafba 1790
dd5f03be 1791 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
07ebafba 1792 if (ret) {
de910bd9 1793 mutex_unlock(&conn_id->handler_mutex);
07ebafba
TT
1794 rdma_destroy_id(new_cm_id);
1795 goto out;
1796 }
1797
be9130cc 1798 ret = cma_acquire_dev(conn_id, listen_id);
07ebafba 1799 if (ret) {
de910bd9 1800 mutex_unlock(&conn_id->handler_mutex);
07ebafba
TT
1801 rdma_destroy_id(new_cm_id);
1802 goto out;
1803 }
1804
1805 conn_id->cm_id.iw = cm_id;
1806 cm_id->context = conn_id;
1807 cm_id->cm_handler = cma_iw_handler;
1808
24d44a39
SW
1809 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
1810 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
07ebafba 1811
8d8293cf
SW
1812 ret = ib_query_device(conn_id->id.device, &attr);
1813 if (ret) {
de910bd9 1814 mutex_unlock(&conn_id->handler_mutex);
8d8293cf
SW
1815 rdma_destroy_id(new_cm_id);
1816 goto out;
1817 }
1818
a1b1b61f
SH
1819 memset(&event, 0, sizeof event);
1820 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1821 event.param.conn.private_data = iw_event->private_data;
1822 event.param.conn.private_data_len = iw_event->private_data_len;
3ebeebc3
KS
1823 event.param.conn.initiator_depth = iw_event->ird;
1824 event.param.conn.responder_resources = iw_event->ord;
25ae21a1
SH
1825
1826 /*
1827 * Protect against the user destroying conn_id from another thread
1828 * until we're done accessing it.
1829 */
1830 atomic_inc(&conn_id->refcount);
a1b1b61f 1831 ret = conn_id->id.event_handler(&conn_id->id, &event);
07ebafba
TT
1832 if (ret) {
1833 /* User wants to destroy the CM ID */
1834 conn_id->cm_id.iw = NULL;
550e5ca7 1835 cma_exch(conn_id, RDMA_CM_DESTROYING);
de910bd9 1836 mutex_unlock(&conn_id->handler_mutex);
25ae21a1 1837 cma_deref_id(conn_id);
07ebafba 1838 rdma_destroy_id(&conn_id->id);
de910bd9 1839 goto out;
07ebafba
TT
1840 }
1841
de910bd9 1842 mutex_unlock(&conn_id->handler_mutex);
25ae21a1 1843 cma_deref_id(conn_id);
de910bd9 1844
07ebafba 1845out:
de910bd9 1846 mutex_unlock(&listen_id->handler_mutex);
07ebafba
TT
1847 return ret;
1848}
1849
e51060f0
SH
1850static int cma_ib_listen(struct rdma_id_private *id_priv)
1851{
1852 struct ib_cm_compare_data compare_data;
1853 struct sockaddr *addr;
0c9361fc 1854 struct ib_cm_id *id;
e51060f0
SH
1855 __be64 svc_id;
1856 int ret;
1857
0c9361fc
JM
1858 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1859 if (IS_ERR(id))
1860 return PTR_ERR(id);
1861
1862 id_priv->cm_id.ib = id;
e51060f0 1863
f4753834 1864 addr = cma_src_addr(id_priv);
cf53936f 1865 svc_id = rdma_get_service_id(&id_priv->id, addr);
406b6a25 1866 if (cma_any_addr(addr) && !id_priv->afonly)
e51060f0
SH
1867 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1868 else {
1869 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1870 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1871 }
1872
1873 if (ret) {
1874 ib_destroy_cm_id(id_priv->cm_id.ib);
1875 id_priv->cm_id.ib = NULL;
1876 }
1877
1878 return ret;
1879}
1880
07ebafba
TT
1881static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1882{
1883 int ret;
0c9361fc
JM
1884 struct iw_cm_id *id;
1885
1886 id = iw_create_cm_id(id_priv->id.device,
1887 iw_conn_req_handler,
1888 id_priv);
1889 if (IS_ERR(id))
1890 return PTR_ERR(id);
07ebafba 1891
68cdba06 1892 id->tos = id_priv->tos;
0c9361fc 1893 id_priv->cm_id.iw = id;
07ebafba 1894
24d44a39
SW
1895 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
1896 rdma_addr_size(cma_src_addr(id_priv)));
07ebafba
TT
1897
1898 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1899
1900 if (ret) {
1901 iw_destroy_cm_id(id_priv->cm_id.iw);
1902 id_priv->cm_id.iw = NULL;
1903 }
1904
1905 return ret;
1906}
1907
e51060f0
SH
1908static int cma_listen_handler(struct rdma_cm_id *id,
1909 struct rdma_cm_event *event)
1910{
1911 struct rdma_id_private *id_priv = id->context;
1912
1913 id->context = id_priv->id.context;
1914 id->event_handler = id_priv->id.event_handler;
1915 return id_priv->id.event_handler(id, event);
1916}
1917
1918static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1919 struct cma_device *cma_dev)
1920{
1921 struct rdma_id_private *dev_id_priv;
1922 struct rdma_cm_id *id;
1923 int ret;
1924
72219cea 1925 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
94d0c939
SH
1926 return;
1927
b26f9b99
SH
1928 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1929 id_priv->id.qp_type);
e51060f0
SH
1930 if (IS_ERR(id))
1931 return;
1932
1933 dev_id_priv = container_of(id, struct rdma_id_private, id);
1934
550e5ca7 1935 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
f4753834
SH
1936 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
1937 rdma_addr_size(cma_src_addr(id_priv)));
e51060f0
SH
1938
1939 cma_attach_to_dev(dev_id_priv, cma_dev);
1940 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
d02d1f53
SH
1941 atomic_inc(&id_priv->refcount);
1942 dev_id_priv->internal_id = 1;
5b0ec991 1943 dev_id_priv->afonly = id_priv->afonly;
e51060f0
SH
1944
1945 ret = rdma_listen(id, id_priv->backlog);
1946 if (ret)
d02d1f53 1947 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
468f2239 1948 "listening on device %s\n", ret, cma_dev->device->name);
e51060f0
SH
1949}
1950
1951static void cma_listen_on_all(struct rdma_id_private *id_priv)
1952{
1953 struct cma_device *cma_dev;
1954
1955 mutex_lock(&lock);
1956 list_add_tail(&id_priv->list, &listen_any_list);
1957 list_for_each_entry(cma_dev, &dev_list, list)
1958 cma_listen_on_dev(id_priv, cma_dev);
1959 mutex_unlock(&lock);
1960}
1961
a81c994d
SH
1962void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1963{
1964 struct rdma_id_private *id_priv;
1965
1966 id_priv = container_of(id, struct rdma_id_private, id);
1967 id_priv->tos = (u8) tos;
1968}
1969EXPORT_SYMBOL(rdma_set_service_type);
1970
e51060f0
SH
1971static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1972 void *context)
1973{
1974 struct cma_work *work = context;
1975 struct rdma_route *route;
1976
1977 route = &work->id->id.route;
1978
1979 if (!status) {
1980 route->num_paths = 1;
1981 *route->path_rec = *path_rec;
1982 } else {
550e5ca7
NM
1983 work->old_state = RDMA_CM_ROUTE_QUERY;
1984 work->new_state = RDMA_CM_ADDR_RESOLVED;
e51060f0 1985 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
8f0472d3 1986 work->event.status = status;
e51060f0
SH
1987 }
1988
1989 queue_work(cma_wq, &work->work);
1990}
1991
1992static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1993 struct cma_work *work)
1994{
f4753834 1995 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
e51060f0 1996 struct ib_sa_path_rec path_rec;
a81c994d
SH
1997 ib_sa_comp_mask comp_mask;
1998 struct sockaddr_in6 *sin6;
f68194ca 1999 struct sockaddr_ib *sib;
e51060f0
SH
2000
2001 memset(&path_rec, 0, sizeof path_rec);
f4753834
SH
2002 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2003 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2004 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
e51060f0 2005 path_rec.numb_path = 1;
962063e6 2006 path_rec.reversible = 1;
cf53936f 2007 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
a81c994d
SH
2008
2009 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2010 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2011 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2012
f68194ca
SH
2013 switch (cma_family(id_priv)) {
2014 case AF_INET:
a81c994d
SH
2015 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2016 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
f68194ca
SH
2017 break;
2018 case AF_INET6:
f4753834 2019 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
a81c994d
SH
2020 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2021 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
f68194ca
SH
2022 break;
2023 case AF_IB:
2024 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2025 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2026 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2027 break;
a81c994d 2028 }
e51060f0 2029
c1a0b23b 2030 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
a81c994d
SH
2031 id_priv->id.port_num, &path_rec,
2032 comp_mask, timeout_ms,
2033 GFP_KERNEL, cma_query_handler,
2034 work, &id_priv->query);
e51060f0
SH
2035
2036 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2037}
2038
c4028958 2039static void cma_work_handler(struct work_struct *_work)
e51060f0 2040{
c4028958 2041 struct cma_work *work = container_of(_work, struct cma_work, work);
e51060f0
SH
2042 struct rdma_id_private *id_priv = work->id;
2043 int destroy = 0;
2044
de910bd9 2045 mutex_lock(&id_priv->handler_mutex);
e51060f0
SH
2046 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
2047 goto out;
2048
2049 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
550e5ca7 2050 cma_exch(id_priv, RDMA_CM_DESTROYING);
e51060f0
SH
2051 destroy = 1;
2052 }
2053out:
de910bd9 2054 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
2055 cma_deref_id(id_priv);
2056 if (destroy)
2057 rdma_destroy_id(&id_priv->id);
2058 kfree(work);
2059}
2060
dd5bdff8
OG
2061static void cma_ndev_work_handler(struct work_struct *_work)
2062{
2063 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
2064 struct rdma_id_private *id_priv = work->id;
2065 int destroy = 0;
2066
2067 mutex_lock(&id_priv->handler_mutex);
550e5ca7
NM
2068 if (id_priv->state == RDMA_CM_DESTROYING ||
2069 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
dd5bdff8
OG
2070 goto out;
2071
2072 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
550e5ca7 2073 cma_exch(id_priv, RDMA_CM_DESTROYING);
dd5bdff8
OG
2074 destroy = 1;
2075 }
2076
2077out:
2078 mutex_unlock(&id_priv->handler_mutex);
2079 cma_deref_id(id_priv);
2080 if (destroy)
2081 rdma_destroy_id(&id_priv->id);
2082 kfree(work);
2083}
2084
e51060f0
SH
2085static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
2086{
2087 struct rdma_route *route = &id_priv->id.route;
2088 struct cma_work *work;
2089 int ret;
2090
2091 work = kzalloc(sizeof *work, GFP_KERNEL);
2092 if (!work)
2093 return -ENOMEM;
2094
2095 work->id = id_priv;
c4028958 2096 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
2097 work->old_state = RDMA_CM_ROUTE_QUERY;
2098 work->new_state = RDMA_CM_ROUTE_RESOLVED;
e51060f0
SH
2099 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2100
2101 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
2102 if (!route->path_rec) {
2103 ret = -ENOMEM;
2104 goto err1;
2105 }
2106
2107 ret = cma_query_ib_route(id_priv, timeout_ms, work);
2108 if (ret)
2109 goto err2;
2110
2111 return 0;
2112err2:
2113 kfree(route->path_rec);
2114 route->path_rec = NULL;
2115err1:
2116 kfree(work);
2117 return ret;
2118}
2119
2120int rdma_set_ib_paths(struct rdma_cm_id *id,
2121 struct ib_sa_path_rec *path_rec, int num_paths)
2122{
2123 struct rdma_id_private *id_priv;
2124 int ret;
2125
2126 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7
NM
2127 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2128 RDMA_CM_ROUTE_RESOLVED))
e51060f0
SH
2129 return -EINVAL;
2130
9893e742
JL
2131 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
2132 GFP_KERNEL);
e51060f0
SH
2133 if (!id->route.path_rec) {
2134 ret = -ENOMEM;
2135 goto err;
2136 }
2137
ae2d9293 2138 id->route.num_paths = num_paths;
e51060f0
SH
2139 return 0;
2140err:
550e5ca7 2141 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
e51060f0
SH
2142 return ret;
2143}
2144EXPORT_SYMBOL(rdma_set_ib_paths);
2145
07ebafba
TT
2146static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
2147{
2148 struct cma_work *work;
2149
2150 work = kzalloc(sizeof *work, GFP_KERNEL);
2151 if (!work)
2152 return -ENOMEM;
2153
2154 work->id = id_priv;
c4028958 2155 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
2156 work->old_state = RDMA_CM_ROUTE_QUERY;
2157 work->new_state = RDMA_CM_ROUTE_RESOLVED;
07ebafba
TT
2158 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2159 queue_work(cma_wq, &work->work);
2160 return 0;
2161}
2162
eb072c4b
EP
2163static int iboe_tos_to_sl(struct net_device *ndev, int tos)
2164{
2165 int prio;
2166 struct net_device *dev;
2167
2168 prio = rt_tos2priority(tos);
2169 dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
2170 vlan_dev_real_dev(ndev) : ndev;
2171
2172 if (dev->num_tc)
2173 return netdev_get_prio_tc_map(dev, prio);
2174
2175#if IS_ENABLED(CONFIG_VLAN_8021Q)
2176 if (ndev->priv_flags & IFF_802_1Q_VLAN)
2177 return (vlan_dev_get_egress_qos_mask(ndev, prio) &
2178 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2179#endif
2180 return 0;
2181}
2182
3c86aa70
EC
2183static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2184{
2185 struct rdma_route *route = &id_priv->id.route;
2186 struct rdma_addr *addr = &route->addr;
2187 struct cma_work *work;
2188 int ret;
3c86aa70 2189 struct net_device *ndev = NULL;
dd5f03be 2190
3c86aa70 2191
3c86aa70
EC
2192 work = kzalloc(sizeof *work, GFP_KERNEL);
2193 if (!work)
2194 return -ENOMEM;
2195
2196 work->id = id_priv;
2197 INIT_WORK(&work->work, cma_work_handler);
2198
2199 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
2200 if (!route->path_rec) {
2201 ret = -ENOMEM;
2202 goto err1;
2203 }
2204
2205 route->num_paths = 1;
2206
3c86aa70
EC
2207 if (addr->dev_addr.bound_dev_if)
2208 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
2209 if (!ndev) {
2210 ret = -ENODEV;
2211 goto err2;
2212 }
2213
dd5f03be
MB
2214 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
2215 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
2216 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
af7bd463 2217
7b85627b
MS
2218 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
2219 &route->path_rec->sgid);
2220 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
2221 &route->path_rec->dgid);
af7bd463
EC
2222
2223 route->path_rec->hop_limit = 1;
2224 route->path_rec->reversible = 1;
2225 route->path_rec->pkey = cpu_to_be16(0xffff);
2226 route->path_rec->mtu_selector = IB_SA_EQ;
eb072c4b 2227 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
3c86aa70
EC
2228 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
2229 route->path_rec->rate_selector = IB_SA_EQ;
2230 route->path_rec->rate = iboe_get_rate(ndev);
2231 dev_put(ndev);
2232 route->path_rec->packet_life_time_selector = IB_SA_EQ;
2233 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
2234 if (!route->path_rec->mtu) {
2235 ret = -EINVAL;
2236 goto err2;
2237 }
2238
550e5ca7
NM
2239 work->old_state = RDMA_CM_ROUTE_QUERY;
2240 work->new_state = RDMA_CM_ROUTE_RESOLVED;
3c86aa70
EC
2241 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2242 work->event.status = 0;
2243
2244 queue_work(cma_wq, &work->work);
2245
2246 return 0;
2247
2248err2:
2249 kfree(route->path_rec);
2250 route->path_rec = NULL;
2251err1:
2252 kfree(work);
2253 return ret;
2254}
2255
e51060f0
SH
2256int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
2257{
2258 struct rdma_id_private *id_priv;
2259 int ret;
2260
2261 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2262 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
e51060f0
SH
2263 return -EINVAL;
2264
2265 atomic_inc(&id_priv->refcount);
fe53ba2f 2266 if (rdma_cap_ib_sa(id->device, id->port_num))
c72f2189 2267 ret = cma_resolve_ib_route(id_priv, timeout_ms);
5d9fb044 2268 else if (rdma_protocol_roce(id->device, id->port_num))
c72f2189
MW
2269 ret = cma_resolve_iboe_route(id_priv);
2270 else if (rdma_protocol_iwarp(id->device, id->port_num))
07ebafba 2271 ret = cma_resolve_iw_route(id_priv, timeout_ms);
c72f2189 2272 else
e51060f0 2273 ret = -ENOSYS;
c72f2189 2274
e51060f0
SH
2275 if (ret)
2276 goto err;
2277
2278 return 0;
2279err:
550e5ca7 2280 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
e51060f0
SH
2281 cma_deref_id(id_priv);
2282 return ret;
2283}
2284EXPORT_SYMBOL(rdma_resolve_route);
2285
6a3e362d
SH
2286static void cma_set_loopback(struct sockaddr *addr)
2287{
2288 switch (addr->sa_family) {
2289 case AF_INET:
2290 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2291 break;
2292 case AF_INET6:
2293 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
2294 0, 0, 0, htonl(1));
2295 break;
2296 default:
2297 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
2298 0, 0, 0, htonl(1));
2299 break;
2300 }
2301}
2302
e51060f0
SH
2303static int cma_bind_loopback(struct rdma_id_private *id_priv)
2304{
b0569e40 2305 struct cma_device *cma_dev, *cur_dev;
e51060f0 2306 struct ib_port_attr port_attr;
f0ee3404 2307 union ib_gid gid;
e51060f0
SH
2308 u16 pkey;
2309 int ret;
2310 u8 p;
2311
b0569e40 2312 cma_dev = NULL;
e51060f0 2313 mutex_lock(&lock);
b0569e40
SH
2314 list_for_each_entry(cur_dev, &dev_list, list) {
2315 if (cma_family(id_priv) == AF_IB &&
72219cea 2316 !rdma_cap_ib_cm(cur_dev->device, 1))
b0569e40
SH
2317 continue;
2318
2319 if (!cma_dev)
2320 cma_dev = cur_dev;
2321
2322 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
2323 if (!ib_query_port(cur_dev->device, p, &port_attr) &&
2324 port_attr.state == IB_PORT_ACTIVE) {
2325 cma_dev = cur_dev;
2326 goto port_found;
2327 }
2328 }
2329 }
2330
2331 if (!cma_dev) {
e82153b5
KK
2332 ret = -ENODEV;
2333 goto out;
2334 }
e51060f0 2335
e82153b5 2336 p = 1;
e51060f0
SH
2337
2338port_found:
f0ee3404 2339 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
e51060f0
SH
2340 if (ret)
2341 goto out;
2342
2343 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2344 if (ret)
2345 goto out;
2346
6f8372b6 2347 id_priv->id.route.addr.dev_addr.dev_type =
21655afc 2348 (rdma_protocol_ib(cma_dev->device, p)) ?
6f8372b6
SH
2349 ARPHRD_INFINIBAND : ARPHRD_ETHER;
2350
2351 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
e51060f0
SH
2352 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
2353 id_priv->id.port_num = p;
2354 cma_attach_to_dev(id_priv, cma_dev);
f4753834 2355 cma_set_loopback(cma_src_addr(id_priv));
e51060f0
SH
2356out:
2357 mutex_unlock(&lock);
2358 return ret;
2359}
2360
2361static void addr_handler(int status, struct sockaddr *src_addr,
2362 struct rdma_dev_addr *dev_addr, void *context)
2363{
2364 struct rdma_id_private *id_priv = context;
a1b1b61f 2365 struct rdma_cm_event event;
e51060f0 2366
a1b1b61f 2367 memset(&event, 0, sizeof event);
de910bd9 2368 mutex_lock(&id_priv->handler_mutex);
550e5ca7
NM
2369 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2370 RDMA_CM_ADDR_RESOLVED))
61a73c70 2371 goto out;
61a73c70 2372
7b85627b 2373 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
61a73c70 2374 if (!status && !id_priv->cma_dev)
be9130cc 2375 status = cma_acquire_dev(id_priv, NULL);
e51060f0
SH
2376
2377 if (status) {
550e5ca7
NM
2378 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2379 RDMA_CM_ADDR_BOUND))
e51060f0 2380 goto out;
a1b1b61f
SH
2381 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2382 event.status = status;
7b85627b 2383 } else
a1b1b61f 2384 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
e51060f0 2385
a1b1b61f 2386 if (id_priv->id.event_handler(&id_priv->id, &event)) {
550e5ca7 2387 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 2388 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
2389 cma_deref_id(id_priv);
2390 rdma_destroy_id(&id_priv->id);
2391 return;
2392 }
2393out:
de910bd9 2394 mutex_unlock(&id_priv->handler_mutex);
e51060f0
SH
2395 cma_deref_id(id_priv);
2396}
2397
2398static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2399{
2400 struct cma_work *work;
f0ee3404 2401 union ib_gid gid;
e51060f0
SH
2402 int ret;
2403
2404 work = kzalloc(sizeof *work, GFP_KERNEL);
2405 if (!work)
2406 return -ENOMEM;
2407
2408 if (!id_priv->cma_dev) {
2409 ret = cma_bind_loopback(id_priv);
2410 if (ret)
2411 goto err;
2412 }
2413
6f8372b6
SH
2414 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2415 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
e51060f0 2416
e51060f0 2417 work->id = id_priv;
c4028958 2418 INIT_WORK(&work->work, cma_work_handler);
550e5ca7
NM
2419 work->old_state = RDMA_CM_ADDR_QUERY;
2420 work->new_state = RDMA_CM_ADDR_RESOLVED;
e51060f0
SH
2421 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2422 queue_work(cma_wq, &work->work);
2423 return 0;
2424err:
2425 kfree(work);
2426 return ret;
2427}
2428
f17df3b0
SH
2429static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
2430{
2431 struct cma_work *work;
2432 int ret;
2433
2434 work = kzalloc(sizeof *work, GFP_KERNEL);
2435 if (!work)
2436 return -ENOMEM;
2437
2438 if (!id_priv->cma_dev) {
2439 ret = cma_resolve_ib_dev(id_priv);
2440 if (ret)
2441 goto err;
2442 }
2443
2444 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
2445 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
2446
2447 work->id = id_priv;
2448 INIT_WORK(&work->work, cma_work_handler);
2449 work->old_state = RDMA_CM_ADDR_QUERY;
2450 work->new_state = RDMA_CM_ADDR_RESOLVED;
2451 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2452 queue_work(cma_wq, &work->work);
2453 return 0;
2454err:
2455 kfree(work);
2456 return ret;
2457}
2458
e51060f0
SH
2459static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2460 struct sockaddr *dst_addr)
2461{
d14714df
SH
2462 if (!src_addr || !src_addr->sa_family) {
2463 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
f17df3b0
SH
2464 src_addr->sa_family = dst_addr->sa_family;
2465 if (dst_addr->sa_family == AF_INET6) {
6c26a771
SB
2466 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2467 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2468 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
2469 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
2470 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
f17df3b0
SH
2471 } else if (dst_addr->sa_family == AF_IB) {
2472 ((struct sockaddr_ib *) src_addr)->sib_pkey =
2473 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
d14714df
SH
2474 }
2475 }
2476 return rdma_bind_addr(id, src_addr);
e51060f0
SH
2477}
2478
2479int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2480 struct sockaddr *dst_addr, int timeout_ms)
2481{
2482 struct rdma_id_private *id_priv;
2483 int ret;
2484
2485 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2486 if (id_priv->state == RDMA_CM_IDLE) {
e51060f0
SH
2487 ret = cma_bind_addr(id, src_addr, dst_addr);
2488 if (ret)
2489 return ret;
2490 }
2491
4ae7152e
SH
2492 if (cma_family(id_priv) != dst_addr->sa_family)
2493 return -EINVAL;
2494
550e5ca7 2495 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
e51060f0
SH
2496 return -EINVAL;
2497
2498 atomic_inc(&id_priv->refcount);
f4753834 2499 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
f17df3b0 2500 if (cma_any_addr(dst_addr)) {
e51060f0 2501 ret = cma_resolve_loopback(id_priv);
f17df3b0
SH
2502 } else {
2503 if (dst_addr->sa_family == AF_IB) {
2504 ret = cma_resolve_ib_addr(id_priv);
2505 } else {
2506 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
2507 dst_addr, &id->route.addr.dev_addr,
2508 timeout_ms, addr_handler, id_priv);
2509 }
2510 }
e51060f0
SH
2511 if (ret)
2512 goto err;
2513
2514 return 0;
2515err:
550e5ca7 2516 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
e51060f0
SH
2517 cma_deref_id(id_priv);
2518 return ret;
2519}
2520EXPORT_SYMBOL(rdma_resolve_addr);
2521
a9bb7912
HS
2522int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2523{
2524 struct rdma_id_private *id_priv;
2525 unsigned long flags;
2526 int ret;
2527
2528 id_priv = container_of(id, struct rdma_id_private, id);
2529 spin_lock_irqsave(&id_priv->lock, flags);
c8dea2f9 2530 if (reuse || id_priv->state == RDMA_CM_IDLE) {
a9bb7912
HS
2531 id_priv->reuseaddr = reuse;
2532 ret = 0;
2533 } else {
2534 ret = -EINVAL;
2535 }
2536 spin_unlock_irqrestore(&id_priv->lock, flags);
2537 return ret;
2538}
2539EXPORT_SYMBOL(rdma_set_reuseaddr);
2540
68602120
SH
2541int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
2542{
2543 struct rdma_id_private *id_priv;
2544 unsigned long flags;
2545 int ret;
2546
2547 id_priv = container_of(id, struct rdma_id_private, id);
2548 spin_lock_irqsave(&id_priv->lock, flags);
2549 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
2550 id_priv->options |= (1 << CMA_OPTION_AFONLY);
2551 id_priv->afonly = afonly;
2552 ret = 0;
2553 } else {
2554 ret = -EINVAL;
2555 }
2556 spin_unlock_irqrestore(&id_priv->lock, flags);
2557 return ret;
2558}
2559EXPORT_SYMBOL(rdma_set_afonly);
2560
e51060f0
SH
2561static void cma_bind_port(struct rdma_bind_list *bind_list,
2562 struct rdma_id_private *id_priv)
2563{
58afdcb7
SH
2564 struct sockaddr *addr;
2565 struct sockaddr_ib *sib;
2566 u64 sid, mask;
2567 __be16 port;
e51060f0 2568
f4753834 2569 addr = cma_src_addr(id_priv);
58afdcb7
SH
2570 port = htons(bind_list->port);
2571
2572 switch (addr->sa_family) {
2573 case AF_INET:
2574 ((struct sockaddr_in *) addr)->sin_port = port;
2575 break;
2576 case AF_INET6:
2577 ((struct sockaddr_in6 *) addr)->sin6_port = port;
2578 break;
2579 case AF_IB:
2580 sib = (struct sockaddr_ib *) addr;
2581 sid = be64_to_cpu(sib->sib_sid);
2582 mask = be64_to_cpu(sib->sib_sid_mask);
2583 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
2584 sib->sib_sid_mask = cpu_to_be64(~0ULL);
2585 break;
2586 }
e51060f0
SH
2587 id_priv->bind_list = bind_list;
2588 hlist_add_head(&id_priv->node, &bind_list->owners);
2589}
2590
aac978e1
HE
2591static int cma_alloc_port(enum rdma_port_space ps,
2592 struct rdma_id_private *id_priv, unsigned short snum)
e51060f0
SH
2593{
2594 struct rdma_bind_list *bind_list;
3b069c5d 2595 int ret;
e51060f0 2596
cb164b8c 2597 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
e51060f0
SH
2598 if (!bind_list)
2599 return -ENOMEM;
2600
aac978e1 2601 ret = cma_ps_alloc(ps, bind_list, snum);
3b069c5d
TH
2602 if (ret < 0)
2603 goto err;
aedec080
SH
2604
2605 bind_list->ps = ps;
3b069c5d 2606 bind_list->port = (unsigned short)ret;
aedec080
SH
2607 cma_bind_port(bind_list, id_priv);
2608 return 0;
3b069c5d 2609err:
aedec080 2610 kfree(bind_list);
3b069c5d 2611 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
aedec080 2612}
e51060f0 2613
aac978e1
HE
2614static int cma_alloc_any_port(enum rdma_port_space ps,
2615 struct rdma_id_private *id_priv)
aedec080 2616{
5d7220e8
TH
2617 static unsigned int last_used_port;
2618 int low, high, remaining;
2619 unsigned int rover;
e51060f0 2620
0bbf87d8 2621 inet_get_local_port_range(&init_net, &low, &high);
5d7220e8 2622 remaining = (high - low) + 1;
63862b5b 2623 rover = prandom_u32() % remaining + low;
5d7220e8
TH
2624retry:
2625 if (last_used_port != rover &&
aac978e1 2626 !cma_ps_find(ps, (unsigned short)rover)) {
5d7220e8
TH
2627 int ret = cma_alloc_port(ps, id_priv, rover);
2628 /*
2629 * Remember previously used port number in order to avoid
2630 * re-using same port immediately after it is closed.
2631 */
2632 if (!ret)
2633 last_used_port = rover;
2634 if (ret != -EADDRNOTAVAIL)
2635 return ret;
e51060f0 2636 }
5d7220e8
TH
2637 if (--remaining) {
2638 rover++;
2639 if ((rover < low) || (rover > high))
2640 rover = low;
2641 goto retry;
2642 }
2643 return -EADDRNOTAVAIL;
e51060f0
SH
2644}
2645
a9bb7912
HS
2646/*
2647 * Check that the requested port is available. This is called when trying to
2648 * bind to a specific port, or when trying to listen on a bound port. In
2649 * the latter case, the provided id_priv may already be on the bind_list, but
2650 * we still need to check that it's okay to start listening.
2651 */
2652static int cma_check_port(struct rdma_bind_list *bind_list,
2653 struct rdma_id_private *id_priv, uint8_t reuseaddr)
e51060f0
SH
2654{
2655 struct rdma_id_private *cur_id;
43b752da 2656 struct sockaddr *addr, *cur_addr;
e51060f0 2657
f4753834 2658 addr = cma_src_addr(id_priv);
b67bfe0d 2659 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
a9bb7912
HS
2660 if (id_priv == cur_id)
2661 continue;
3cd96564 2662
5b0ec991
SH
2663 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
2664 cur_id->reuseaddr)
2665 continue;
e51060f0 2666
f4753834 2667 cur_addr = cma_src_addr(cur_id);
5b0ec991
SH
2668 if (id_priv->afonly && cur_id->afonly &&
2669 (addr->sa_family != cur_addr->sa_family))
2670 continue;
2671
2672 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
2673 return -EADDRNOTAVAIL;
2674
2675 if (!cma_addr_cmp(addr, cur_addr))
2676 return -EADDRINUSE;
a9bb7912 2677 }
e51060f0
SH
2678 return 0;
2679}
2680
aac978e1
HE
2681static int cma_use_port(enum rdma_port_space ps,
2682 struct rdma_id_private *id_priv)
a9bb7912
HS
2683{
2684 struct rdma_bind_list *bind_list;
2685 unsigned short snum;
2686 int ret;
2687
f4753834 2688 snum = ntohs(cma_port(cma_src_addr(id_priv)));
a9bb7912
HS
2689 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2690 return -EACCES;
2691
aac978e1 2692 bind_list = cma_ps_find(ps, snum);
a9bb7912
HS
2693 if (!bind_list) {
2694 ret = cma_alloc_port(ps, id_priv, snum);
2695 } else {
2696 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2697 if (!ret)
2698 cma_bind_port(bind_list, id_priv);
2699 }
2700 return ret;
2701}
2702
2703static int cma_bind_listen(struct rdma_id_private *id_priv)
2704{
2705 struct rdma_bind_list *bind_list = id_priv->bind_list;
2706 int ret = 0;
2707
2708 mutex_lock(&lock);
2709 if (bind_list->owners.first->next)
2710 ret = cma_check_port(bind_list, id_priv, 0);
2711 mutex_unlock(&lock);
2712 return ret;
2713}
2714
aac978e1
HE
2715static enum rdma_port_space cma_select_inet_ps(
2716 struct rdma_id_private *id_priv)
e51060f0 2717{
e51060f0 2718 switch (id_priv->id.ps) {
e51060f0 2719 case RDMA_PS_TCP:
628e5f6d 2720 case RDMA_PS_UDP:
c8f6a362 2721 case RDMA_PS_IPOIB:
2d2e9415 2722 case RDMA_PS_IB:
aac978e1 2723 return id_priv->id.ps;
e51060f0 2724 default:
aac978e1
HE
2725
2726 return 0;
58afdcb7
SH
2727 }
2728}
2729
aac978e1 2730static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
58afdcb7 2731{
aac978e1 2732 enum rdma_port_space ps = 0;
58afdcb7
SH
2733 struct sockaddr_ib *sib;
2734 u64 sid_ps, mask, sid;
2735
f4753834 2736 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
58afdcb7
SH
2737 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
2738 sid = be64_to_cpu(sib->sib_sid) & mask;
2739
2740 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
2741 sid_ps = RDMA_IB_IP_PS_IB;
aac978e1 2742 ps = RDMA_PS_IB;
58afdcb7
SH
2743 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
2744 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
2745 sid_ps = RDMA_IB_IP_PS_TCP;
aac978e1 2746 ps = RDMA_PS_TCP;
58afdcb7
SH
2747 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
2748 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
2749 sid_ps = RDMA_IB_IP_PS_UDP;
aac978e1 2750 ps = RDMA_PS_UDP;
e51060f0
SH
2751 }
2752
58afdcb7
SH
2753 if (ps) {
2754 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
2755 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
2756 be64_to_cpu(sib->sib_sid_mask));
2757 }
2758 return ps;
2759}
2760
2761static int cma_get_port(struct rdma_id_private *id_priv)
2762{
aac978e1 2763 enum rdma_port_space ps;
58afdcb7
SH
2764 int ret;
2765
f4753834 2766 if (cma_family(id_priv) != AF_IB)
58afdcb7
SH
2767 ps = cma_select_inet_ps(id_priv);
2768 else
2769 ps = cma_select_ib_ps(id_priv);
2770 if (!ps)
2771 return -EPROTONOSUPPORT;
2772
e51060f0 2773 mutex_lock(&lock);
f4753834 2774 if (cma_any_port(cma_src_addr(id_priv)))
aedec080 2775 ret = cma_alloc_any_port(ps, id_priv);
e51060f0
SH
2776 else
2777 ret = cma_use_port(ps, id_priv);
2778 mutex_unlock(&lock);
2779
2780 return ret;
2781}
2782
d14714df
SH
2783static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2784 struct sockaddr *addr)
2785{
d90f9b35 2786#if IS_ENABLED(CONFIG_IPV6)
d14714df
SH
2787 struct sockaddr_in6 *sin6;
2788
2789 if (addr->sa_family != AF_INET6)
2790 return 0;
2791
2792 sin6 = (struct sockaddr_in6 *) addr;
5462eddd
SK
2793
2794 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
2795 return 0;
2796
2797 if (!sin6->sin6_scope_id)
d14714df
SH
2798 return -EINVAL;
2799
2800 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2801#endif
2802 return 0;
2803}
2804
a9bb7912
HS
2805int rdma_listen(struct rdma_cm_id *id, int backlog)
2806{
2807 struct rdma_id_private *id_priv;
2808 int ret;
2809
2810 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2811 if (id_priv->state == RDMA_CM_IDLE) {
f4753834
SH
2812 id->route.addr.src_addr.ss_family = AF_INET;
2813 ret = rdma_bind_addr(id, cma_src_addr(id_priv));
a9bb7912
HS
2814 if (ret)
2815 return ret;
2816 }
2817
550e5ca7 2818 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
a9bb7912
HS
2819 return -EINVAL;
2820
2821 if (id_priv->reuseaddr) {
2822 ret = cma_bind_listen(id_priv);
2823 if (ret)
2824 goto err;
2825 }
2826
2827 id_priv->backlog = backlog;
2828 if (id->device) {
72219cea 2829 if (rdma_cap_ib_cm(id->device, 1)) {
a9bb7912
HS
2830 ret = cma_ib_listen(id_priv);
2831 if (ret)
2832 goto err;
04215330 2833 } else if (rdma_cap_iw_cm(id->device, 1)) {
a9bb7912
HS
2834 ret = cma_iw_listen(id_priv, backlog);
2835 if (ret)
2836 goto err;
21655afc 2837 } else {
a9bb7912
HS
2838 ret = -ENOSYS;
2839 goto err;
2840 }
2841 } else
2842 cma_listen_on_all(id_priv);
2843
2844 return 0;
2845err:
2846 id_priv->backlog = 0;
550e5ca7 2847 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
a9bb7912
HS
2848 return ret;
2849}
2850EXPORT_SYMBOL(rdma_listen);
2851
e51060f0
SH
2852int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2853{
2854 struct rdma_id_private *id_priv;
2855 int ret;
2856
680f920a
SH
2857 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
2858 addr->sa_family != AF_IB)
e51060f0
SH
2859 return -EAFNOSUPPORT;
2860
2861 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 2862 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
e51060f0
SH
2863 return -EINVAL;
2864
d14714df
SH
2865 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2866 if (ret)
2867 goto err1;
2868
7b85627b 2869 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
8523c048 2870 if (!cma_any_addr(addr)) {
680f920a 2871 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
e51060f0 2872 if (ret)
255d0c14
KK
2873 goto err1;
2874
be9130cc 2875 ret = cma_acquire_dev(id_priv, NULL);
255d0c14
KK
2876 if (ret)
2877 goto err1;
e51060f0
SH
2878 }
2879
68602120
SH
2880 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
2881 if (addr->sa_family == AF_INET)
2882 id_priv->afonly = 1;
5b0ec991 2883#if IS_ENABLED(CONFIG_IPV6)
68602120
SH
2884 else if (addr->sa_family == AF_INET6)
2885 id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
5b0ec991 2886#endif
68602120 2887 }
e51060f0
SH
2888 ret = cma_get_port(id_priv);
2889 if (ret)
255d0c14 2890 goto err2;
e51060f0
SH
2891
2892 return 0;
255d0c14 2893err2:
a396d43a
SH
2894 if (id_priv->cma_dev)
2895 cma_release_dev(id_priv);
255d0c14 2896err1:
550e5ca7 2897 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
e51060f0
SH
2898 return ret;
2899}
2900EXPORT_SYMBOL(rdma_bind_addr);
2901
f4753834 2902static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
e51060f0 2903{
e51060f0 2904 struct cma_hdr *cma_hdr;
e51060f0 2905
01602f11
SH
2906 cma_hdr = hdr;
2907 cma_hdr->cma_version = CMA_VERSION;
f4753834 2908 if (cma_family(id_priv) == AF_INET) {
1f5175ad
AS
2909 struct sockaddr_in *src4, *dst4;
2910
f4753834
SH
2911 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
2912 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
1f5175ad 2913
01602f11
SH
2914 cma_set_ip_ver(cma_hdr, 4);
2915 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2916 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2917 cma_hdr->port = src4->sin_port;
e8160e15 2918 } else if (cma_family(id_priv) == AF_INET6) {
1f5175ad
AS
2919 struct sockaddr_in6 *src6, *dst6;
2920
f4753834
SH
2921 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2922 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
1f5175ad 2923
01602f11
SH
2924 cma_set_ip_ver(cma_hdr, 6);
2925 cma_hdr->src_addr.ip6 = src6->sin6_addr;
2926 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
2927 cma_hdr->port = src6->sin6_port;
e51060f0
SH
2928 }
2929 return 0;
2930}
2931
628e5f6d
SH
2932static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2933 struct ib_cm_event *ib_event)
2934{
2935 struct rdma_id_private *id_priv = cm_id->context;
2936 struct rdma_cm_event event;
2937 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2938 int ret = 0;
2939
550e5ca7 2940 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
8aa08602 2941 return 0;
628e5f6d 2942
8aa08602 2943 memset(&event, 0, sizeof event);
628e5f6d
SH
2944 switch (ib_event->event) {
2945 case IB_CM_SIDR_REQ_ERROR:
2946 event.event = RDMA_CM_EVENT_UNREACHABLE;
2947 event.status = -ETIMEDOUT;
2948 break;
2949 case IB_CM_SIDR_REP_RECEIVED:
2950 event.param.ud.private_data = ib_event->private_data;
2951 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2952 if (rep->status != IB_SIDR_SUCCESS) {
2953 event.event = RDMA_CM_EVENT_UNREACHABLE;
2954 event.status = ib_event->param.sidr_rep_rcvd.status;
2955 break;
2956 }
5c438135 2957 ret = cma_set_qkey(id_priv, rep->qkey);
d2ca39f2
YE
2958 if (ret) {
2959 event.event = RDMA_CM_EVENT_ADDR_ERROR;
5c438135 2960 event.status = ret;
628e5f6d
SH
2961 break;
2962 }
2963 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2964 id_priv->id.route.path_rec,
2965 &event.param.ud.ah_attr);
2966 event.param.ud.qp_num = rep->qpn;
2967 event.param.ud.qkey = rep->qkey;
2968 event.event = RDMA_CM_EVENT_ESTABLISHED;
2969 event.status = 0;
2970 break;
2971 default:
468f2239 2972 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
628e5f6d
SH
2973 ib_event->event);
2974 goto out;
2975 }
2976
2977 ret = id_priv->id.event_handler(&id_priv->id, &event);
2978 if (ret) {
2979 /* Destroy the CM ID by returning a non-zero value. */
2980 id_priv->cm_id.ib = NULL;
550e5ca7 2981 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 2982 mutex_unlock(&id_priv->handler_mutex);
628e5f6d
SH
2983 rdma_destroy_id(&id_priv->id);
2984 return ret;
2985 }
2986out:
de910bd9 2987 mutex_unlock(&id_priv->handler_mutex);
628e5f6d
SH
2988 return ret;
2989}
2990
2991static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2992 struct rdma_conn_param *conn_param)
2993{
2994 struct ib_cm_sidr_req_param req;
0c9361fc 2995 struct ib_cm_id *id;
e511d1ae 2996 void *private_data;
e8160e15 2997 int offset, ret;
628e5f6d 2998
e511d1ae 2999 memset(&req, 0, sizeof req);
e8160e15
SH
3000 offset = cma_user_data_offset(id_priv);
3001 req.private_data_len = offset + conn_param->private_data_len;
04ded167
SH
3002 if (req.private_data_len < conn_param->private_data_len)
3003 return -EINVAL;
3004
e8160e15 3005 if (req.private_data_len) {
e511d1ae
SH
3006 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3007 if (!private_data)
e8160e15
SH
3008 return -ENOMEM;
3009 } else {
e511d1ae 3010 private_data = NULL;
e8160e15 3011 }
628e5f6d
SH
3012
3013 if (conn_param->private_data && conn_param->private_data_len)
e511d1ae
SH
3014 memcpy(private_data + offset, conn_param->private_data,
3015 conn_param->private_data_len);
628e5f6d 3016
e511d1ae
SH
3017 if (private_data) {
3018 ret = cma_format_hdr(private_data, id_priv);
e8160e15
SH
3019 if (ret)
3020 goto out;
e511d1ae 3021 req.private_data = private_data;
e8160e15 3022 }
628e5f6d 3023
0c9361fc
JM
3024 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
3025 id_priv);
3026 if (IS_ERR(id)) {
3027 ret = PTR_ERR(id);
628e5f6d
SH
3028 goto out;
3029 }
0c9361fc 3030 id_priv->cm_id.ib = id;
628e5f6d 3031
f4753834 3032 req.path = id_priv->id.route.path_rec;
cf53936f 3033 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
628e5f6d
SH
3034 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
3035 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3036
3037 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
3038 if (ret) {
3039 ib_destroy_cm_id(id_priv->cm_id.ib);
3040 id_priv->cm_id.ib = NULL;
3041 }
3042out:
e511d1ae 3043 kfree(private_data);
628e5f6d
SH
3044 return ret;
3045}
3046
e51060f0
SH
3047static int cma_connect_ib(struct rdma_id_private *id_priv,
3048 struct rdma_conn_param *conn_param)
3049{
3050 struct ib_cm_req_param req;
3051 struct rdma_route *route;
3052 void *private_data;
0c9361fc 3053 struct ib_cm_id *id;
e51060f0
SH
3054 int offset, ret;
3055
3056 memset(&req, 0, sizeof req);
e8160e15 3057 offset = cma_user_data_offset(id_priv);
e51060f0 3058 req.private_data_len = offset + conn_param->private_data_len;
04ded167
SH
3059 if (req.private_data_len < conn_param->private_data_len)
3060 return -EINVAL;
3061
e8160e15
SH
3062 if (req.private_data_len) {
3063 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3064 if (!private_data)
3065 return -ENOMEM;
3066 } else {
3067 private_data = NULL;
3068 }
e51060f0
SH
3069
3070 if (conn_param->private_data && conn_param->private_data_len)
3071 memcpy(private_data + offset, conn_param->private_data,
3072 conn_param->private_data_len);
3073
0c9361fc
JM
3074 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
3075 if (IS_ERR(id)) {
3076 ret = PTR_ERR(id);
e51060f0
SH
3077 goto out;
3078 }
0c9361fc 3079 id_priv->cm_id.ib = id;
e51060f0
SH
3080
3081 route = &id_priv->id.route;
e8160e15
SH
3082 if (private_data) {
3083 ret = cma_format_hdr(private_data, id_priv);
3084 if (ret)
3085 goto out;
3086 req.private_data = private_data;
3087 }
e51060f0
SH
3088
3089 req.primary_path = &route->path_rec[0];
3090 if (route->num_paths == 2)
3091 req.alternate_path = &route->path_rec[1];
3092
cf53936f 3093 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
e51060f0 3094 req.qp_num = id_priv->qp_num;
18c441a6 3095 req.qp_type = id_priv->id.qp_type;
e51060f0
SH
3096 req.starting_psn = id_priv->seq_num;
3097 req.responder_resources = conn_param->responder_resources;
3098 req.initiator_depth = conn_param->initiator_depth;
3099 req.flow_control = conn_param->flow_control;
4ede178a
SH
3100 req.retry_count = min_t(u8, 7, conn_param->retry_count);
3101 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
e51060f0
SH
3102 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3103 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3104 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3105 req.srq = id_priv->srq ? 1 : 0;
3106
3107 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
3108out:
0c9361fc
JM
3109 if (ret && !IS_ERR(id)) {
3110 ib_destroy_cm_id(id);
675a027c
KK
3111 id_priv->cm_id.ib = NULL;
3112 }
3113
e51060f0
SH
3114 kfree(private_data);
3115 return ret;
3116}
3117
07ebafba
TT
3118static int cma_connect_iw(struct rdma_id_private *id_priv,
3119 struct rdma_conn_param *conn_param)
3120{
3121 struct iw_cm_id *cm_id;
07ebafba
TT
3122 int ret;
3123 struct iw_cm_conn_param iw_param;
3124
3125 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
0c9361fc
JM
3126 if (IS_ERR(cm_id))
3127 return PTR_ERR(cm_id);
07ebafba 3128
68cdba06 3129 cm_id->tos = id_priv->tos;
07ebafba
TT
3130 id_priv->cm_id.iw = cm_id;
3131
24d44a39
SW
3132 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
3133 rdma_addr_size(cma_src_addr(id_priv)));
3134 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
3135 rdma_addr_size(cma_dst_addr(id_priv)));
07ebafba 3136
5851bb89 3137 ret = cma_modify_qp_rtr(id_priv, conn_param);
675a027c
KK
3138 if (ret)
3139 goto out;
07ebafba 3140
f45ee80e
HS
3141 if (conn_param) {
3142 iw_param.ord = conn_param->initiator_depth;
3143 iw_param.ird = conn_param->responder_resources;
3144 iw_param.private_data = conn_param->private_data;
3145 iw_param.private_data_len = conn_param->private_data_len;
3146 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
3147 } else {
3148 memset(&iw_param, 0, sizeof iw_param);
07ebafba 3149 iw_param.qpn = id_priv->qp_num;
f45ee80e 3150 }
07ebafba
TT
3151 ret = iw_cm_connect(cm_id, &iw_param);
3152out:
0c9361fc 3153 if (ret) {
675a027c
KK
3154 iw_destroy_cm_id(cm_id);
3155 id_priv->cm_id.iw = NULL;
3156 }
07ebafba
TT
3157 return ret;
3158}
3159
e51060f0
SH
3160int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3161{
3162 struct rdma_id_private *id_priv;
3163 int ret;
3164
3165 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7 3166 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
e51060f0
SH
3167 return -EINVAL;
3168
3169 if (!id->qp) {
3170 id_priv->qp_num = conn_param->qp_num;
e51060f0
SH
3171 id_priv->srq = conn_param->srq;
3172 }
3173
72219cea 3174 if (rdma_cap_ib_cm(id->device, id->port_num)) {
b26f9b99 3175 if (id->qp_type == IB_QPT_UD)
628e5f6d
SH
3176 ret = cma_resolve_ib_udp(id_priv, conn_param);
3177 else
3178 ret = cma_connect_ib(id_priv, conn_param);
04215330 3179 } else if (rdma_cap_iw_cm(id->device, id->port_num))
07ebafba 3180 ret = cma_connect_iw(id_priv, conn_param);
21655afc 3181 else
e51060f0 3182 ret = -ENOSYS;
e51060f0
SH
3183 if (ret)
3184 goto err;
3185
3186 return 0;
3187err:
550e5ca7 3188 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
e51060f0
SH
3189 return ret;
3190}
3191EXPORT_SYMBOL(rdma_connect);
3192
3193static int cma_accept_ib(struct rdma_id_private *id_priv,
3194 struct rdma_conn_param *conn_param)
3195{
3196 struct ib_cm_rep_param rep;
5851bb89 3197 int ret;
0fe313b0 3198
5851bb89
SH
3199 ret = cma_modify_qp_rtr(id_priv, conn_param);
3200 if (ret)
3201 goto out;
0fe313b0 3202
5851bb89
SH
3203 ret = cma_modify_qp_rts(id_priv, conn_param);
3204 if (ret)
3205 goto out;
e51060f0
SH
3206
3207 memset(&rep, 0, sizeof rep);
3208 rep.qp_num = id_priv->qp_num;
3209 rep.starting_psn = id_priv->seq_num;
3210 rep.private_data = conn_param->private_data;
3211 rep.private_data_len = conn_param->private_data_len;
3212 rep.responder_resources = conn_param->responder_resources;
3213 rep.initiator_depth = conn_param->initiator_depth;
e51060f0
SH
3214 rep.failover_accepted = 0;
3215 rep.flow_control = conn_param->flow_control;
4ede178a 3216 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
e51060f0
SH
3217 rep.srq = id_priv->srq ? 1 : 0;
3218
0fe313b0
SH
3219 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
3220out:
3221 return ret;
e51060f0
SH
3222}
3223
07ebafba
TT
3224static int cma_accept_iw(struct rdma_id_private *id_priv,
3225 struct rdma_conn_param *conn_param)
3226{
3227 struct iw_cm_conn_param iw_param;
3228 int ret;
3229
5851bb89 3230 ret = cma_modify_qp_rtr(id_priv, conn_param);
07ebafba
TT
3231 if (ret)
3232 return ret;
3233
3234 iw_param.ord = conn_param->initiator_depth;
3235 iw_param.ird = conn_param->responder_resources;
3236 iw_param.private_data = conn_param->private_data;
3237 iw_param.private_data_len = conn_param->private_data_len;
3238 if (id_priv->id.qp) {
3239 iw_param.qpn = id_priv->qp_num;
3240 } else
3241 iw_param.qpn = conn_param->qp_num;
3242
3243 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
3244}
3245
628e5f6d 3246static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
5c438135 3247 enum ib_cm_sidr_status status, u32 qkey,
628e5f6d
SH
3248 const void *private_data, int private_data_len)
3249{
3250 struct ib_cm_sidr_rep_param rep;
d2ca39f2 3251 int ret;
628e5f6d
SH
3252
3253 memset(&rep, 0, sizeof rep);
3254 rep.status = status;
3255 if (status == IB_SIDR_SUCCESS) {
5c438135 3256 ret = cma_set_qkey(id_priv, qkey);
d2ca39f2
YE
3257 if (ret)
3258 return ret;
628e5f6d 3259 rep.qp_num = id_priv->qp_num;
c8f6a362 3260 rep.qkey = id_priv->qkey;
628e5f6d
SH
3261 }
3262 rep.private_data = private_data;
3263 rep.private_data_len = private_data_len;
3264
3265 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
3266}
3267
e51060f0
SH
3268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3269{
3270 struct rdma_id_private *id_priv;
3271 int ret;
3272
3273 id_priv = container_of(id, struct rdma_id_private, id);
83e9502d
NM
3274
3275 id_priv->owner = task_pid_nr(current);
3276
550e5ca7 3277 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
e51060f0
SH
3278 return -EINVAL;
3279
3280 if (!id->qp && conn_param) {
3281 id_priv->qp_num = conn_param->qp_num;
e51060f0
SH
3282 id_priv->srq = conn_param->srq;
3283 }
3284
72219cea 3285 if (rdma_cap_ib_cm(id->device, id->port_num)) {
f45ee80e
HS
3286 if (id->qp_type == IB_QPT_UD) {
3287 if (conn_param)
3288 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
5c438135 3289 conn_param->qkey,
f45ee80e
HS
3290 conn_param->private_data,
3291 conn_param->private_data_len);
3292 else
3293 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
5c438135 3294 0, NULL, 0);
f45ee80e
HS
3295 } else {
3296 if (conn_param)
3297 ret = cma_accept_ib(id_priv, conn_param);
3298 else
3299 ret = cma_rep_recv(id_priv);
3300 }
04215330 3301 } else if (rdma_cap_iw_cm(id->device, id->port_num))
07ebafba 3302 ret = cma_accept_iw(id_priv, conn_param);
21655afc 3303 else
e51060f0 3304 ret = -ENOSYS;
e51060f0
SH
3305
3306 if (ret)
3307 goto reject;
3308
3309 return 0;
3310reject:
c5483388 3311 cma_modify_qp_err(id_priv);
e51060f0
SH
3312 rdma_reject(id, NULL, 0);
3313 return ret;
3314}
3315EXPORT_SYMBOL(rdma_accept);
3316
0fe313b0
SH
3317int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
3318{
3319 struct rdma_id_private *id_priv;
3320 int ret;
3321
3322 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3323 if (!id_priv->cm_id.ib)
0fe313b0
SH
3324 return -EINVAL;
3325
3326 switch (id->device->node_type) {
3327 case RDMA_NODE_IB_CA:
3328 ret = ib_cm_notify(id_priv->cm_id.ib, event);
3329 break;
3330 default:
3331 ret = 0;
3332 break;
3333 }
3334 return ret;
3335}
3336EXPORT_SYMBOL(rdma_notify);
3337
e51060f0
SH
3338int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3339 u8 private_data_len)
3340{
3341 struct rdma_id_private *id_priv;
3342 int ret;
3343
3344 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3345 if (!id_priv->cm_id.ib)
e51060f0
SH
3346 return -EINVAL;
3347
72219cea 3348 if (rdma_cap_ib_cm(id->device, id->port_num)) {
b26f9b99 3349 if (id->qp_type == IB_QPT_UD)
5c438135 3350 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
628e5f6d
SH
3351 private_data, private_data_len);
3352 else
3353 ret = ib_send_cm_rej(id_priv->cm_id.ib,
3354 IB_CM_REJ_CONSUMER_DEFINED, NULL,
3355 0, private_data, private_data_len);
04215330 3356 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
07ebafba
TT
3357 ret = iw_cm_reject(id_priv->cm_id.iw,
3358 private_data, private_data_len);
21655afc 3359 } else
e51060f0 3360 ret = -ENOSYS;
21655afc 3361
e51060f0
SH
3362 return ret;
3363}
3364EXPORT_SYMBOL(rdma_reject);
3365
3366int rdma_disconnect(struct rdma_cm_id *id)
3367{
3368 struct rdma_id_private *id_priv;
3369 int ret;
3370
3371 id_priv = container_of(id, struct rdma_id_private, id);
0c9361fc 3372 if (!id_priv->cm_id.ib)
e51060f0
SH
3373 return -EINVAL;
3374
72219cea 3375 if (rdma_cap_ib_cm(id->device, id->port_num)) {
c5483388 3376 ret = cma_modify_qp_err(id_priv);
07ebafba
TT
3377 if (ret)
3378 goto out;
e51060f0
SH
3379 /* Initiate or respond to a disconnect. */
3380 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
3381 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
04215330 3382 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
07ebafba 3383 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
21655afc 3384 } else
07ebafba 3385 ret = -EINVAL;
21655afc 3386
e51060f0
SH
3387out:
3388 return ret;
3389}
3390EXPORT_SYMBOL(rdma_disconnect);
3391
c8f6a362
SH
3392static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3393{
3394 struct rdma_id_private *id_priv;
3395 struct cma_multicast *mc = multicast->context;
3396 struct rdma_cm_event event;
3397 int ret;
3398
3399 id_priv = mc->id_priv;
550e5ca7
NM
3400 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
3401 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
8aa08602 3402 return 0;
c8f6a362 3403
5c438135
SH
3404 if (!status)
3405 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
c5483388 3406 mutex_lock(&id_priv->qp_mutex);
c8f6a362
SH
3407 if (!status && id_priv->id.qp)
3408 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
46ea5061 3409 be16_to_cpu(multicast->rec.mlid));
c5483388 3410 mutex_unlock(&id_priv->qp_mutex);
c8f6a362
SH
3411
3412 memset(&event, 0, sizeof event);
3413 event.status = status;
3414 event.param.ud.private_data = mc->context;
3415 if (!status) {
3416 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
3417 ib_init_ah_from_mcmember(id_priv->id.device,
3418 id_priv->id.port_num, &multicast->rec,
3419 &event.param.ud.ah_attr);
3420 event.param.ud.qp_num = 0xFFFFFF;
3421 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
3422 } else
3423 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
3424
3425 ret = id_priv->id.event_handler(&id_priv->id, &event);
3426 if (ret) {
550e5ca7 3427 cma_exch(id_priv, RDMA_CM_DESTROYING);
de910bd9 3428 mutex_unlock(&id_priv->handler_mutex);
c8f6a362
SH
3429 rdma_destroy_id(&id_priv->id);
3430 return 0;
3431 }
8aa08602 3432
de910bd9 3433 mutex_unlock(&id_priv->handler_mutex);
c8f6a362
SH
3434 return 0;
3435}
3436
3437static void cma_set_mgid(struct rdma_id_private *id_priv,
3438 struct sockaddr *addr, union ib_gid *mgid)
3439{
3440 unsigned char mc_map[MAX_ADDR_LEN];
3441 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3442 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
3443 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
3444
3445 if (cma_any_addr(addr)) {
3446 memset(mgid, 0, sizeof *mgid);
3447 } else if ((addr->sa_family == AF_INET6) &&
1c9b2819 3448 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
c8f6a362
SH
3449 0xFF10A01B)) {
3450 /* IPv6 address is an SA assigned MGID. */
3451 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
5bc2b7b3
SH
3452 } else if (addr->sa_family == AF_IB) {
3453 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
e2e62697
JG
3454 } else if ((addr->sa_family == AF_INET6)) {
3455 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
3456 if (id_priv->id.ps == RDMA_PS_UDP)
3457 mc_map[7] = 0x01; /* Use RDMA CM signature */
3458 *mgid = *(union ib_gid *) (mc_map + 4);
c8f6a362 3459 } else {
a9e527e3 3460 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
c8f6a362
SH
3461 if (id_priv->id.ps == RDMA_PS_UDP)
3462 mc_map[7] = 0x01; /* Use RDMA CM signature */
c8f6a362
SH
3463 *mgid = *(union ib_gid *) (mc_map + 4);
3464 }
3465}
3466
3467static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3468 struct cma_multicast *mc)
3469{
3470 struct ib_sa_mcmember_rec rec;
3471 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3472 ib_sa_comp_mask comp_mask;
3473 int ret;
3474
3475 ib_addr_get_mgid(dev_addr, &rec.mgid);
3476 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
3477 &rec.mgid, &rec);
3478 if (ret)
3479 return ret;
3480
5bc2b7b3
SH
3481 ret = cma_set_qkey(id_priv, 0);
3482 if (ret)
3483 return ret;
3484
3f446754 3485 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
5bc2b7b3 3486 rec.qkey = cpu_to_be32(id_priv->qkey);
6f8372b6 3487 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
c8f6a362
SH
3488 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
3489 rec.join_state = 1;
3490
3491 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
3492 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
3493 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
3494 IB_SA_MCMEMBER_REC_FLOW_LABEL |
3495 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
3496
84adeee9
YE
3497 if (id_priv->id.ps == RDMA_PS_IPOIB)
3498 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
2a22fb8c
DB
3499 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
3500 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
3501 IB_SA_MCMEMBER_REC_MTU |
3502 IB_SA_MCMEMBER_REC_HOP_LIMIT;
84adeee9 3503
c8f6a362
SH
3504 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
3505 id_priv->id.port_num, &rec,
3506 comp_mask, GFP_KERNEL,
3507 cma_ib_mc_handler, mc);
8c6ffba0 3508 return PTR_ERR_OR_ZERO(mc->multicast.ib);
c8f6a362
SH
3509}
3510
3c86aa70
EC
3511static void iboe_mcast_work_handler(struct work_struct *work)
3512{
3513 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
3514 struct cma_multicast *mc = mw->mc;
3515 struct ib_sa_multicast *m = mc->multicast.ib;
3516
3517 mc->multicast.ib->context = mc;
3518 cma_ib_mc_handler(0, m);
3519 kref_put(&mc->mcref, release_mc);
3520 kfree(mw);
3521}
3522
3523static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
3524{
3525 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
3526 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
3527
3528 if (cma_any_addr(addr)) {
3529 memset(mgid, 0, sizeof *mgid);
3530 } else if (addr->sa_family == AF_INET6) {
3531 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3532 } else {
3533 mgid->raw[0] = 0xff;
3534 mgid->raw[1] = 0x0e;
3535 mgid->raw[2] = 0;
3536 mgid->raw[3] = 0;
3537 mgid->raw[4] = 0;
3538 mgid->raw[5] = 0;
3539 mgid->raw[6] = 0;
3540 mgid->raw[7] = 0;
3541 mgid->raw[8] = 0;
3542 mgid->raw[9] = 0;
3543 mgid->raw[10] = 0xff;
3544 mgid->raw[11] = 0xff;
3545 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
3546 }
3547}
3548
3549static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3550 struct cma_multicast *mc)
3551{
3552 struct iboe_mcast_work *work;
3553 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3554 int err;
3555 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3556 struct net_device *ndev = NULL;
3557
3558 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3559 return -EINVAL;
3560
3561 work = kzalloc(sizeof *work, GFP_KERNEL);
3562 if (!work)
3563 return -ENOMEM;
3564
3565 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
3566 if (!mc->multicast.ib) {
3567 err = -ENOMEM;
3568 goto out1;
3569 }
3570
3571 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
3572
3573 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
3574 if (id_priv->id.ps == RDMA_PS_UDP)
3575 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
3576
3577 if (dev_addr->bound_dev_if)
3578 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
3579 if (!ndev) {
3580 err = -ENODEV;
3581 goto out2;
3582 }
3583 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
3584 mc->multicast.ib->rec.hop_limit = 1;
3585 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3586 dev_put(ndev);
3587 if (!mc->multicast.ib->rec.mtu) {
3588 err = -EINVAL;
3589 goto out2;
3590 }
7b85627b
MS
3591 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3592 &mc->multicast.ib->rec.port_gid);
3c86aa70
EC
3593 work->id = id_priv;
3594 work->mc = mc;
3595 INIT_WORK(&work->work, iboe_mcast_work_handler);
3596 kref_get(&mc->mcref);
3597 queue_work(cma_wq, &work->work);
3598
3599 return 0;
3600
3601out2:
3602 kfree(mc->multicast.ib);
3603out1:
3604 kfree(work);
3605 return err;
3606}
3607
c8f6a362
SH
3608int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3609 void *context)
3610{
3611 struct rdma_id_private *id_priv;
3612 struct cma_multicast *mc;
3613 int ret;
3614
3615 id_priv = container_of(id, struct rdma_id_private, id);
550e5ca7
NM
3616 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3617 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
c8f6a362
SH
3618 return -EINVAL;
3619
3620 mc = kmalloc(sizeof *mc, GFP_KERNEL);
3621 if (!mc)
3622 return -ENOMEM;
3623
ef560861 3624 memcpy(&mc->addr, addr, rdma_addr_size(addr));
c8f6a362
SH
3625 mc->context = context;
3626 mc->id_priv = id_priv;
3627
3628 spin_lock(&id_priv->lock);
3629 list_add(&mc->list, &id_priv->mc_list);
3630 spin_unlock(&id_priv->lock);
3631
5d9fb044 3632 if (rdma_protocol_roce(id->device, id->port_num)) {
5c9a5282
MW
3633 kref_init(&mc->mcref);
3634 ret = cma_iboe_join_multicast(id_priv, mc);
a31ad3b0 3635 } else if (rdma_cap_ib_mcast(id->device, id->port_num))
5c9a5282
MW
3636 ret = cma_join_ib_multicast(id_priv, mc);
3637 else
c8f6a362 3638 ret = -ENOSYS;
c8f6a362
SH
3639
3640 if (ret) {
3641 spin_lock_irq(&id_priv->lock);
3642 list_del(&mc->list);
3643 spin_unlock_irq(&id_priv->lock);
3644 kfree(mc);
3645 }
3646 return ret;
3647}
3648EXPORT_SYMBOL(rdma_join_multicast);
3649
3650void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3651{
3652 struct rdma_id_private *id_priv;
3653 struct cma_multicast *mc;
3654
3655 id_priv = container_of(id, struct rdma_id_private, id);
3656 spin_lock_irq(&id_priv->lock);
3657 list_for_each_entry(mc, &id_priv->mc_list, list) {
ef560861 3658 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
c8f6a362
SH
3659 list_del(&mc->list);
3660 spin_unlock_irq(&id_priv->lock);
3661
3662 if (id->qp)
3663 ib_detach_mcast(id->qp,
3664 &mc->multicast.ib->rec.mgid,
46ea5061 3665 be16_to_cpu(mc->multicast.ib->rec.mlid));
5c9a5282
MW
3666
3667 BUG_ON(id_priv->cma_dev->device != id->device);
3668
a31ad3b0 3669 if (rdma_cap_ib_mcast(id->device, id->port_num)) {
5c9a5282
MW
3670 ib_sa_free_multicast(mc->multicast.ib);
3671 kfree(mc);
5d9fb044 3672 } else if (rdma_protocol_roce(id->device, id->port_num))
5c9a5282
MW
3673 kref_put(&mc->mcref, release_mc);
3674
c8f6a362
SH
3675 return;
3676 }
3677 }
3678 spin_unlock_irq(&id_priv->lock);
3679}
3680EXPORT_SYMBOL(rdma_leave_multicast);
3681
dd5bdff8
OG
3682static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
3683{
3684 struct rdma_dev_addr *dev_addr;
3685 struct cma_ndev_work *work;
3686
3687 dev_addr = &id_priv->id.route.addr.dev_addr;
3688
6266ed6e 3689 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
dd5bdff8
OG
3690 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
3691 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3692 ndev->name, &id_priv->id);
3693 work = kzalloc(sizeof *work, GFP_KERNEL);
3694 if (!work)
3695 return -ENOMEM;
3696
3697 INIT_WORK(&work->work, cma_ndev_work_handler);
3698 work->id = id_priv;
3699 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
3700 atomic_inc(&id_priv->refcount);
3701 queue_work(cma_wq, &work->work);
3702 }
3703
3704 return 0;
3705}
3706
3707static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
351638e7 3708 void *ptr)
dd5bdff8 3709{
351638e7 3710 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
dd5bdff8
OG
3711 struct cma_device *cma_dev;
3712 struct rdma_id_private *id_priv;
3713 int ret = NOTIFY_DONE;
3714
3715 if (dev_net(ndev) != &init_net)
3716 return NOTIFY_DONE;
3717
3718 if (event != NETDEV_BONDING_FAILOVER)
3719 return NOTIFY_DONE;
3720
3721 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
3722 return NOTIFY_DONE;
3723
3724 mutex_lock(&lock);
3725 list_for_each_entry(cma_dev, &dev_list, list)
3726 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3727 ret = cma_netdev_change(ndev, id_priv);
3728 if (ret)
3729 goto out;
3730 }
3731
3732out:
3733 mutex_unlock(&lock);
3734 return ret;
3735}
3736
3737static struct notifier_block cma_nb = {
3738 .notifier_call = cma_netdev_callback
3739};
3740
e51060f0
SH
3741static void cma_add_one(struct ib_device *device)
3742{
3743 struct cma_device *cma_dev;
3744 struct rdma_id_private *id_priv;
3745
3746 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
3747 if (!cma_dev)
3748 return;
3749
3750 cma_dev->device = device;
e51060f0
SH
3751
3752 init_completion(&cma_dev->comp);
3753 atomic_set(&cma_dev->refcount, 1);
3754 INIT_LIST_HEAD(&cma_dev->id_list);
3755 ib_set_client_data(device, &cma_client, cma_dev);
3756
3757 mutex_lock(&lock);
3758 list_add_tail(&cma_dev->list, &dev_list);
3759 list_for_each_entry(id_priv, &listen_any_list, list)
3760 cma_listen_on_dev(id_priv, cma_dev);
3761 mutex_unlock(&lock);
e51060f0
SH
3762}
3763
3764static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3765{
a1b1b61f 3766 struct rdma_cm_event event;
550e5ca7 3767 enum rdma_cm_state state;
de910bd9 3768 int ret = 0;
e51060f0
SH
3769
3770 /* Record that we want to remove the device */
550e5ca7
NM
3771 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3772 if (state == RDMA_CM_DESTROYING)
e51060f0
SH
3773 return 0;
3774
3775 cma_cancel_operation(id_priv, state);
de910bd9 3776 mutex_lock(&id_priv->handler_mutex);
e51060f0
SH
3777
3778 /* Check for destruction from another callback. */
550e5ca7 3779 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
de910bd9 3780 goto out;
e51060f0 3781
a1b1b61f
SH
3782 memset(&event, 0, sizeof event);
3783 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
de910bd9
OG
3784 ret = id_priv->id.event_handler(&id_priv->id, &event);
3785out:
3786 mutex_unlock(&id_priv->handler_mutex);
3787 return ret;
e51060f0
SH
3788}
3789
3790static void cma_process_remove(struct cma_device *cma_dev)
3791{
e51060f0
SH
3792 struct rdma_id_private *id_priv;
3793 int ret;
3794
e51060f0
SH
3795 mutex_lock(&lock);
3796 while (!list_empty(&cma_dev->id_list)) {
3797 id_priv = list_entry(cma_dev->id_list.next,
3798 struct rdma_id_private, list);
3799
d02d1f53 3800 list_del(&id_priv->listen_list);
94de178a 3801 list_del_init(&id_priv->list);
e51060f0
SH
3802 atomic_inc(&id_priv->refcount);
3803 mutex_unlock(&lock);
3804
d02d1f53 3805 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
e51060f0
SH
3806 cma_deref_id(id_priv);
3807 if (ret)
3808 rdma_destroy_id(&id_priv->id);
3809
3810 mutex_lock(&lock);
3811 }
3812 mutex_unlock(&lock);
3813
3814 cma_deref_dev(cma_dev);
3815 wait_for_completion(&cma_dev->comp);
3816}
3817
7c1eb45a 3818static void cma_remove_one(struct ib_device *device, void *client_data)
e51060f0 3819{
7c1eb45a 3820 struct cma_device *cma_dev = client_data;
e51060f0 3821
e51060f0
SH
3822 if (!cma_dev)
3823 return;
3824
3825 mutex_lock(&lock);
3826 list_del(&cma_dev->list);
3827 mutex_unlock(&lock);
3828
3829 cma_process_remove(cma_dev);
3830 kfree(cma_dev);
3831}
3832
753f618a
NM
3833static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3834{
3835 struct nlmsghdr *nlh;
3836 struct rdma_cm_id_stats *id_stats;
3837 struct rdma_id_private *id_priv;
3838 struct rdma_cm_id *id = NULL;
3839 struct cma_device *cma_dev;
3840 int i_dev = 0, i_id = 0;
3841
3842 /*
3843 * We export all of the IDs as a sequence of messages. Each
3844 * ID gets its own netlink message.
3845 */
3846 mutex_lock(&lock);
3847
3848 list_for_each_entry(cma_dev, &dev_list, list) {
3849 if (i_dev < cb->args[0]) {
3850 i_dev++;
3851 continue;
3852 }
3853
3854 i_id = 0;
3855 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3856 if (i_id < cb->args[1]) {
3857 i_id++;
3858 continue;
3859 }
3860
3861 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3862 sizeof *id_stats, RDMA_NL_RDMA_CM,
30dc5e63
TN
3863 RDMA_NL_RDMA_CM_ID_STATS,
3864 NLM_F_MULTI);
753f618a
NM
3865 if (!id_stats)
3866 goto out;
3867
3868 memset(id_stats, 0, sizeof *id_stats);
3869 id = &id_priv->id;
3870 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3871 id_stats->port_num = id->port_num;
3872 id_stats->bound_dev_if =
3873 id->route.addr.dev_addr.bound_dev_if;
3874
ce117ffa
SH
3875 if (ibnl_put_attr(skb, nlh,
3876 rdma_addr_size(cma_src_addr(id_priv)),
3877 cma_src_addr(id_priv),
3878 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
3879 goto out;
3880 if (ibnl_put_attr(skb, nlh,
3881 rdma_addr_size(cma_src_addr(id_priv)),
3882 cma_dst_addr(id_priv),
3883 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
3884 goto out;
753f618a 3885
83e9502d 3886 id_stats->pid = id_priv->owner;
753f618a
NM
3887 id_stats->port_space = id->ps;
3888 id_stats->cm_state = id_priv->state;
3889 id_stats->qp_num = id_priv->qp_num;
3890 id_stats->qp_type = id->qp_type;
3891
3892 i_id++;
3893 }
3894
3895 cb->args[1] = 0;
3896 i_dev++;
3897 }
3898
3899out:
3900 mutex_unlock(&lock);
3901 cb->args[0] = i_dev;
3902 cb->args[1] = i_id;
3903
3904 return skb->len;
3905}
3906
3907static const struct ibnl_client_cbs cma_cb_table[] = {
809d5fc9
G
3908 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
3909 .module = THIS_MODULE },
753f618a
NM
3910};
3911
716abb1f 3912static int __init cma_init(void)
e51060f0 3913{
5d7220e8 3914 int ret;
227b60f5 3915
c7f743a6 3916 cma_wq = create_singlethread_workqueue("rdma_cm");
e51060f0
SH
3917 if (!cma_wq)
3918 return -ENOMEM;
3919
c1a0b23b 3920 ib_sa_register_client(&sa_client);
7a118df3 3921 rdma_addr_register_client(&addr_client);
dd5bdff8 3922 register_netdevice_notifier(&cma_nb);
c1a0b23b 3923
e51060f0
SH
3924 ret = ib_register_client(&cma_client);
3925 if (ret)
3926 goto err;
753f618a
NM
3927
3928 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
3929 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
3930
e51060f0
SH
3931 return 0;
3932
3933err:
dd5bdff8 3934 unregister_netdevice_notifier(&cma_nb);
7a118df3 3935 rdma_addr_unregister_client(&addr_client);
c1a0b23b 3936 ib_sa_unregister_client(&sa_client);
e51060f0
SH
3937 destroy_workqueue(cma_wq);
3938 return ret;
3939}
3940
716abb1f 3941static void __exit cma_cleanup(void)
e51060f0 3942{
753f618a 3943 ibnl_remove_client(RDMA_NL_RDMA_CM);
e51060f0 3944 ib_unregister_client(&cma_client);
dd5bdff8 3945 unregister_netdevice_notifier(&cma_nb);
7a118df3 3946 rdma_addr_unregister_client(&addr_client);
c1a0b23b 3947 ib_sa_unregister_client(&sa_client);
e51060f0 3948 destroy_workqueue(cma_wq);
e51060f0 3949 idr_destroy(&tcp_ps);
628e5f6d 3950 idr_destroy(&udp_ps);
c8f6a362 3951 idr_destroy(&ipoib_ps);
2d2e9415 3952 idr_destroy(&ib_ps);
e51060f0
SH
3953}
3954
3955module_init(cma_init);
3956module_exit(cma_cleanup);
This page took 1.154076 seconds and 5 git commands to generate.