[IB] CM: Fix initialization of QP attributes for UC QPs.
[deliverable/linux.git] / drivers / infiniband / core / cm.c
CommitLineData
a977049d
HR
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
a4d61e84
RD
46#include <rdma/ib_cache.h>
47#include <rdma/ib_cm.h>
a977049d
HR
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58 .name = "cm",
59 .add = cm_add_one,
60 .remove = cm_remove_one
61};
62
63static struct ib_cm {
64 spinlock_t lock;
65 struct list_head device_list;
66 rwlock_t device_lock;
67 struct rb_root listen_service_table;
68 u64 listen_service_id;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table;
71 struct rb_root remote_id_table;
72 struct rb_root remote_sidr_table;
73 struct idr local_id_table;
74 struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78 struct cm_device *cm_dev;
79 struct ib_mad_agent *mad_agent;
80 u8 port_num;
81};
82
83struct cm_device {
84 struct list_head list;
85 struct ib_device *device;
97f52eb4 86 __be64 ca_guid;
a977049d
HR
87 struct cm_port port[0];
88};
89
90struct cm_av {
91 struct cm_port *port;
92 union ib_gid dgid;
93 struct ib_ah_attr ah_attr;
94 u16 pkey_index;
95 u8 packet_life_time;
96};
97
98struct cm_work {
99 struct work_struct work;
100 struct list_head list;
101 struct cm_port *port;
102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
97f52eb4
SH
103 __be32 local_id; /* Established / timewait */
104 __be32 remote_id;
a977049d
HR
105 struct ib_cm_event cm_event;
106 struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110 struct cm_work work; /* Must be first. */
111 struct rb_node remote_qp_node;
112 struct rb_node remote_id_node;
97f52eb4
SH
113 __be64 remote_ca_guid;
114 __be32 remote_qpn;
a977049d
HR
115 u8 inserted_remote_qp;
116 u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120 struct ib_cm_id id;
121
122 struct rb_node service_node;
123 struct rb_node sidr_id_node;
124 spinlock_t lock;
125 wait_queue_head_t wait;
126 atomic_t refcount;
127
128 struct ib_mad_send_buf *msg;
129 struct cm_timewait_info *timewait_info;
130 /* todo: use alternate port on send failure */
131 struct cm_av av;
132 struct cm_av alt_av;
133
134 void *private_data;
97f52eb4
SH
135 __be64 tid;
136 __be32 local_qpn;
137 __be32 remote_qpn;
ae7971a7 138 enum ib_qp_type qp_type;
97f52eb4
SH
139 __be32 sq_psn;
140 __be32 rq_psn;
a977049d
HR
141 int timeout_ms;
142 enum ib_mtu path_mtu;
143 u8 private_data_len;
144 u8 max_cm_retries;
145 u8 peer_to_peer;
146 u8 responder_resources;
147 u8 initiator_depth;
148 u8 local_ack_timeout;
149 u8 retry_count;
150 u8 rnr_retry_count;
151 u8 service_timeout;
152
153 struct list_head work_list;
154 atomic_t work_count;
155};
156
157static void cm_work_handler(void *data);
158
159static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
160{
161 if (atomic_dec_and_test(&cm_id_priv->refcount))
162 wake_up(&cm_id_priv->wait);
163}
164
165static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
166 struct ib_mad_send_buf **msg)
167{
168 struct ib_mad_agent *mad_agent;
169 struct ib_mad_send_buf *m;
170 struct ib_ah *ah;
171
172 mad_agent = cm_id_priv->av.port->mad_agent;
173 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
174 if (IS_ERR(ah))
175 return PTR_ERR(ah);
176
354ba39c
JK
177 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
178 cm_id_priv->av.pkey_index,
a977049d
HR
179 ah, 0, sizeof(struct ib_mad_hdr),
180 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
181 GFP_ATOMIC);
182 if (IS_ERR(m)) {
183 ib_destroy_ah(ah);
184 return PTR_ERR(m);
185 }
186
187 /* Timeout set by caller if response is expected. */
188 m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
189
190 atomic_inc(&cm_id_priv->refcount);
191 m->context[0] = cm_id_priv;
192 *msg = m;
193 return 0;
194}
195
196static int cm_alloc_response_msg(struct cm_port *port,
197 struct ib_mad_recv_wc *mad_recv_wc,
198 struct ib_mad_send_buf **msg)
199{
200 struct ib_mad_send_buf *m;
201 struct ib_ah *ah;
202
203 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
204 mad_recv_wc->recv_buf.grh, port->port_num);
205 if (IS_ERR(ah))
206 return PTR_ERR(ah);
207
208 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
209 ah, 0, sizeof(struct ib_mad_hdr),
210 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
211 GFP_ATOMIC);
212 if (IS_ERR(m)) {
213 ib_destroy_ah(ah);
214 return PTR_ERR(m);
215 }
216 *msg = m;
217 return 0;
218}
219
220static void cm_free_msg(struct ib_mad_send_buf *msg)
221{
222 ib_destroy_ah(msg->send_wr.wr.ud.ah);
223 if (msg->context[0])
224 cm_deref_id(msg->context[0]);
225 ib_free_send_mad(msg);
226}
227
228static void * cm_copy_private_data(const void *private_data,
229 u8 private_data_len)
230{
231 void *data;
232
233 if (!private_data || !private_data_len)
234 return NULL;
235
236 data = kmalloc(private_data_len, GFP_KERNEL);
237 if (!data)
238 return ERR_PTR(-ENOMEM);
239
240 memcpy(data, private_data, private_data_len);
241 return data;
242}
243
244static void cm_set_private_data(struct cm_id_private *cm_id_priv,
245 void *private_data, u8 private_data_len)
246{
247 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
248 kfree(cm_id_priv->private_data);
249
250 cm_id_priv->private_data = private_data;
251 cm_id_priv->private_data_len = private_data_len;
252}
253
254static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
255 u16 dlid, u8 sl, u16 src_path_bits)
256{
257 memset(ah_attr, 0, sizeof ah_attr);
97f52eb4 258 ah_attr->dlid = dlid;
a977049d
HR
259 ah_attr->sl = sl;
260 ah_attr->src_path_bits = src_path_bits;
261 ah_attr->port_num = port_num;
262}
263
264static void cm_init_av_for_response(struct cm_port *port,
265 struct ib_wc *wc, struct cm_av *av)
266{
267 av->port = port;
268 av->pkey_index = wc->pkey_index;
97f52eb4 269 cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
a977049d
HR
270 wc->sl, wc->dlid_path_bits);
271}
272
273static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
274{
275 struct cm_device *cm_dev;
276 struct cm_port *port = NULL;
277 unsigned long flags;
278 int ret;
279 u8 p;
280
281 read_lock_irqsave(&cm.device_lock, flags);
282 list_for_each_entry(cm_dev, &cm.device_list, list) {
283 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
284 &p, NULL)) {
285 port = &cm_dev->port[p-1];
286 break;
287 }
288 }
289 read_unlock_irqrestore(&cm.device_lock, flags);
290
291 if (!port)
292 return -EINVAL;
293
294 ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
295 be16_to_cpu(path->pkey), &av->pkey_index);
296 if (ret)
297 return ret;
298
299 av->port = port;
97f52eb4
SH
300 cm_set_ah_attr(&av->ah_attr, av->port->port_num,
301 be16_to_cpu(path->dlid), path->sl,
302 be16_to_cpu(path->slid) & 0x7F);
a977049d
HR
303 av->packet_life_time = path->packet_life_time;
304 return 0;
305}
306
307static int cm_alloc_id(struct cm_id_private *cm_id_priv)
308{
309 unsigned long flags;
310 int ret;
311
312 do {
313 spin_lock_irqsave(&cm.lock, flags);
314 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
97f52eb4 315 (__force int *) &cm_id_priv->id.local_id);
a977049d
HR
316 spin_unlock_irqrestore(&cm.lock, flags);
317 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
318 return ret;
319}
320
97f52eb4 321static void cm_free_id(__be32 local_id)
a977049d
HR
322{
323 unsigned long flags;
324
325 spin_lock_irqsave(&cm.lock, flags);
97f52eb4 326 idr_remove(&cm.local_id_table, (__force int) local_id);
a977049d
HR
327 spin_unlock_irqrestore(&cm.lock, flags);
328}
329
97f52eb4 330static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
a977049d
HR
331{
332 struct cm_id_private *cm_id_priv;
333
97f52eb4 334 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
a977049d
HR
335 if (cm_id_priv) {
336 if (cm_id_priv->id.remote_id == remote_id)
337 atomic_inc(&cm_id_priv->refcount);
338 else
339 cm_id_priv = NULL;
340 }
341
342 return cm_id_priv;
343}
344
97f52eb4 345static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
a977049d
HR
346{
347 struct cm_id_private *cm_id_priv;
348 unsigned long flags;
349
350 spin_lock_irqsave(&cm.lock, flags);
351 cm_id_priv = cm_get_id(local_id, remote_id);
352 spin_unlock_irqrestore(&cm.lock, flags);
353
354 return cm_id_priv;
355}
356
357static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
358{
359 struct rb_node **link = &cm.listen_service_table.rb_node;
360 struct rb_node *parent = NULL;
361 struct cm_id_private *cur_cm_id_priv;
97f52eb4
SH
362 __be64 service_id = cm_id_priv->id.service_id;
363 __be64 service_mask = cm_id_priv->id.service_mask;
a977049d
HR
364
365 while (*link) {
366 parent = *link;
367 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
368 service_node);
369 if ((cur_cm_id_priv->id.service_mask & service_id) ==
07d357d0
SH
370 (service_mask & cur_cm_id_priv->id.service_id) &&
371 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
372 return cur_cm_id_priv;
373
374 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
375 link = &(*link)->rb_left;
376 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
377 link = &(*link)->rb_right;
378 else if (service_id < cur_cm_id_priv->id.service_id)
a977049d
HR
379 link = &(*link)->rb_left;
380 else
381 link = &(*link)->rb_right;
382 }
383 rb_link_node(&cm_id_priv->service_node, parent, link);
384 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
385 return NULL;
386}
387
07d357d0
SH
388static struct cm_id_private * cm_find_listen(struct ib_device *device,
389 __be64 service_id)
a977049d
HR
390{
391 struct rb_node *node = cm.listen_service_table.rb_node;
392 struct cm_id_private *cm_id_priv;
393
394 while (node) {
395 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
396 if ((cm_id_priv->id.service_mask & service_id) ==
07d357d0
SH
397 cm_id_priv->id.service_id &&
398 (cm_id_priv->id.device == device))
a977049d 399 return cm_id_priv;
07d357d0
SH
400
401 if (device < cm_id_priv->id.device)
402 node = node->rb_left;
403 else if (device > cm_id_priv->id.device)
404 node = node->rb_right;
405 else if (service_id < cm_id_priv->id.service_id)
a977049d
HR
406 node = node->rb_left;
407 else
408 node = node->rb_right;
409 }
410 return NULL;
411}
412
413static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
414 *timewait_info)
415{
416 struct rb_node **link = &cm.remote_id_table.rb_node;
417 struct rb_node *parent = NULL;
418 struct cm_timewait_info *cur_timewait_info;
97f52eb4
SH
419 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
420 __be32 remote_id = timewait_info->work.remote_id;
a977049d
HR
421
422 while (*link) {
423 parent = *link;
424 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
425 remote_id_node);
426 if (remote_id < cur_timewait_info->work.remote_id)
427 link = &(*link)->rb_left;
428 else if (remote_id > cur_timewait_info->work.remote_id)
429 link = &(*link)->rb_right;
430 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
431 link = &(*link)->rb_left;
432 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
433 link = &(*link)->rb_right;
434 else
435 return cur_timewait_info;
436 }
437 timewait_info->inserted_remote_id = 1;
438 rb_link_node(&timewait_info->remote_id_node, parent, link);
439 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
440 return NULL;
441}
442
97f52eb4
SH
443static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
444 __be32 remote_id)
a977049d
HR
445{
446 struct rb_node *node = cm.remote_id_table.rb_node;
447 struct cm_timewait_info *timewait_info;
448
449 while (node) {
450 timewait_info = rb_entry(node, struct cm_timewait_info,
451 remote_id_node);
452 if (remote_id < timewait_info->work.remote_id)
453 node = node->rb_left;
454 else if (remote_id > timewait_info->work.remote_id)
455 node = node->rb_right;
456 else if (remote_ca_guid < timewait_info->remote_ca_guid)
457 node = node->rb_left;
458 else if (remote_ca_guid > timewait_info->remote_ca_guid)
459 node = node->rb_right;
460 else
461 return timewait_info;
462 }
463 return NULL;
464}
465
466static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
467 *timewait_info)
468{
469 struct rb_node **link = &cm.remote_qp_table.rb_node;
470 struct rb_node *parent = NULL;
471 struct cm_timewait_info *cur_timewait_info;
97f52eb4
SH
472 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
473 __be32 remote_qpn = timewait_info->remote_qpn;
a977049d
HR
474
475 while (*link) {
476 parent = *link;
477 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
478 remote_qp_node);
479 if (remote_qpn < cur_timewait_info->remote_qpn)
480 link = &(*link)->rb_left;
481 else if (remote_qpn > cur_timewait_info->remote_qpn)
482 link = &(*link)->rb_right;
483 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
484 link = &(*link)->rb_left;
485 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
486 link = &(*link)->rb_right;
487 else
488 return cur_timewait_info;
489 }
490 timewait_info->inserted_remote_qp = 1;
491 rb_link_node(&timewait_info->remote_qp_node, parent, link);
492 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
493 return NULL;
494}
495
496static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
497 *cm_id_priv)
498{
499 struct rb_node **link = &cm.remote_sidr_table.rb_node;
500 struct rb_node *parent = NULL;
501 struct cm_id_private *cur_cm_id_priv;
502 union ib_gid *port_gid = &cm_id_priv->av.dgid;
97f52eb4 503 __be32 remote_id = cm_id_priv->id.remote_id;
a977049d
HR
504
505 while (*link) {
506 parent = *link;
507 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
508 sidr_id_node);
509 if (remote_id < cur_cm_id_priv->id.remote_id)
510 link = &(*link)->rb_left;
511 else if (remote_id > cur_cm_id_priv->id.remote_id)
512 link = &(*link)->rb_right;
513 else {
514 int cmp;
515 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
516 sizeof *port_gid);
517 if (cmp < 0)
518 link = &(*link)->rb_left;
519 else if (cmp > 0)
520 link = &(*link)->rb_right;
521 else
522 return cur_cm_id_priv;
523 }
524 }
525 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
526 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
527 return NULL;
528}
529
530static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
531 enum ib_cm_sidr_status status)
532{
533 struct ib_cm_sidr_rep_param param;
534
535 memset(&param, 0, sizeof param);
536 param.status = status;
537 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
538}
539
07d357d0
SH
540struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
541 ib_cm_handler cm_handler,
a977049d
HR
542 void *context)
543{
544 struct cm_id_private *cm_id_priv;
545 int ret;
546
547 cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
548 if (!cm_id_priv)
549 return ERR_PTR(-ENOMEM);
550
551 memset(cm_id_priv, 0, sizeof *cm_id_priv);
552 cm_id_priv->id.state = IB_CM_IDLE;
07d357d0 553 cm_id_priv->id.device = device;
a977049d
HR
554 cm_id_priv->id.cm_handler = cm_handler;
555 cm_id_priv->id.context = context;
354ba39c 556 cm_id_priv->id.remote_cm_qpn = 1;
a977049d
HR
557 ret = cm_alloc_id(cm_id_priv);
558 if (ret)
559 goto error;
560
561 spin_lock_init(&cm_id_priv->lock);
562 init_waitqueue_head(&cm_id_priv->wait);
563 INIT_LIST_HEAD(&cm_id_priv->work_list);
564 atomic_set(&cm_id_priv->work_count, -1);
565 atomic_set(&cm_id_priv->refcount, 1);
566 return &cm_id_priv->id;
567
568error:
569 kfree(cm_id_priv);
570 return ERR_PTR(-ENOMEM);
571}
572EXPORT_SYMBOL(ib_create_cm_id);
573
574static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
575{
576 struct cm_work *work;
577
578 if (list_empty(&cm_id_priv->work_list))
579 return NULL;
580
581 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
582 list_del(&work->list);
583 return work;
584}
585
586static void cm_free_work(struct cm_work *work)
587{
588 if (work->mad_recv_wc)
589 ib_free_recv_mad(work->mad_recv_wc);
590 kfree(work);
591}
592
593static inline int cm_convert_to_ms(int iba_time)
594{
595 /* approximate conversion to ms from 4.096us x 2^iba_time */
596 return 1 << max(iba_time - 8, 0);
597}
598
599static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
600{
601 unsigned long flags;
602
603 if (!timewait_info->inserted_remote_id &&
604 !timewait_info->inserted_remote_qp)
605 return;
606
607 spin_lock_irqsave(&cm.lock, flags);
608 if (timewait_info->inserted_remote_id) {
609 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
610 timewait_info->inserted_remote_id = 0;
611 }
612
613 if (timewait_info->inserted_remote_qp) {
614 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
615 timewait_info->inserted_remote_qp = 0;
616 }
617 spin_unlock_irqrestore(&cm.lock, flags);
618}
619
97f52eb4 620static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
a977049d
HR
621{
622 struct cm_timewait_info *timewait_info;
623
624 timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
625 if (!timewait_info)
626 return ERR_PTR(-ENOMEM);
627 memset(timewait_info, 0, sizeof *timewait_info);
628
629 timewait_info->work.local_id = local_id;
630 INIT_WORK(&timewait_info->work.work, cm_work_handler,
631 &timewait_info->work);
632 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
633 return timewait_info;
634}
635
636static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
637{
638 int wait_time;
639
640 /*
641 * The cm_id could be destroyed by the user before we exit timewait.
642 * To protect against this, we search for the cm_id after exiting
643 * timewait before notifying the user that we've exited timewait.
644 */
645 cm_id_priv->id.state = IB_CM_TIMEWAIT;
646 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
647 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
648 msecs_to_jiffies(wait_time));
649 cm_id_priv->timewait_info = NULL;
650}
651
652static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
653{
654 cm_id_priv->id.state = IB_CM_IDLE;
655 if (cm_id_priv->timewait_info) {
656 cm_cleanup_timewait(cm_id_priv->timewait_info);
657 kfree(cm_id_priv->timewait_info);
658 cm_id_priv->timewait_info = NULL;
659 }
660}
661
662void ib_destroy_cm_id(struct ib_cm_id *cm_id)
663{
664 struct cm_id_private *cm_id_priv;
665 struct cm_work *work;
666 unsigned long flags;
667
668 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
669retest:
670 spin_lock_irqsave(&cm_id_priv->lock, flags);
671 switch (cm_id->state) {
672 case IB_CM_LISTEN:
673 cm_id->state = IB_CM_IDLE;
674 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
675 spin_lock_irqsave(&cm.lock, flags);
676 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
677 spin_unlock_irqrestore(&cm.lock, flags);
678 break;
679 case IB_CM_SIDR_REQ_SENT:
680 cm_id->state = IB_CM_IDLE;
681 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
682 (unsigned long) cm_id_priv->msg);
683 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
684 break;
685 case IB_CM_SIDR_REQ_RCVD:
686 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
687 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
688 break;
689 case IB_CM_REQ_SENT:
690 case IB_CM_MRA_REQ_RCVD:
691 case IB_CM_REP_SENT:
692 case IB_CM_MRA_REP_RCVD:
693 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
694 (unsigned long) cm_id_priv->msg);
695 /* Fall through */
696 case IB_CM_REQ_RCVD:
697 case IB_CM_MRA_REQ_SENT:
698 case IB_CM_REP_RCVD:
699 case IB_CM_MRA_REP_SENT:
700 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
701 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
702 &cm_id_priv->av.port->cm_dev->ca_guid,
703 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
704 NULL, 0);
705 break;
706 case IB_CM_ESTABLISHED:
707 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
708 ib_send_cm_dreq(cm_id, NULL, 0);
709 goto retest;
710 case IB_CM_DREQ_SENT:
711 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
712 (unsigned long) cm_id_priv->msg);
713 cm_enter_timewait(cm_id_priv);
714 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
715 break;
716 case IB_CM_DREQ_RCVD:
717 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
718 ib_send_cm_drep(cm_id, NULL, 0);
719 break;
720 default:
721 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
722 break;
723 }
724
725 cm_free_id(cm_id->local_id);
726 atomic_dec(&cm_id_priv->refcount);
727 wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
728 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
729 cm_free_work(work);
730 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
731 kfree(cm_id_priv->private_data);
732 kfree(cm_id_priv);
733}
734EXPORT_SYMBOL(ib_destroy_cm_id);
735
736int ib_cm_listen(struct ib_cm_id *cm_id,
97f52eb4
SH
737 __be64 service_id,
738 __be64 service_mask)
a977049d
HR
739{
740 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
741 unsigned long flags;
742 int ret = 0;
743
97f52eb4
SH
744 service_mask = service_mask ? service_mask :
745 __constant_cpu_to_be64(~0ULL);
a977049d
HR
746 service_id &= service_mask;
747 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
748 (service_id != IB_CM_ASSIGN_SERVICE_ID))
749 return -EINVAL;
750
751 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
752 BUG_ON(cm_id->state != IB_CM_IDLE);
753
754 cm_id->state = IB_CM_LISTEN;
755
756 spin_lock_irqsave(&cm.lock, flags);
757 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
97f52eb4
SH
758 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
759 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
760 } else {
761 cm_id->service_id = service_id;
762 cm_id->service_mask = service_mask;
763 }
764 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
765 spin_unlock_irqrestore(&cm.lock, flags);
766
767 if (cur_cm_id_priv) {
768 cm_id->state = IB_CM_IDLE;
769 ret = -EBUSY;
770 }
771 return ret;
772}
773EXPORT_SYMBOL(ib_cm_listen);
774
97f52eb4
SH
775static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
776 enum cm_msg_sequence msg_seq)
a977049d
HR
777{
778 u64 hi_tid, low_tid;
779
780 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
97f52eb4
SH
781 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
782 (msg_seq << 30));
a977049d
HR
783 return cpu_to_be64(hi_tid | low_tid);
784}
785
786static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
97f52eb4 787 __be16 attr_id, __be64 tid)
a977049d
HR
788{
789 hdr->base_version = IB_MGMT_BASE_VERSION;
790 hdr->mgmt_class = IB_MGMT_CLASS_CM;
791 hdr->class_version = IB_CM_CLASS_VERSION;
792 hdr->method = IB_MGMT_METHOD_SEND;
793 hdr->attr_id = attr_id;
794 hdr->tid = tid;
795}
796
797static void cm_format_req(struct cm_req_msg *req_msg,
798 struct cm_id_private *cm_id_priv,
799 struct ib_cm_req_param *param)
800{
801 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
802 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
803
804 req_msg->local_comm_id = cm_id_priv->id.local_id;
805 req_msg->service_id = param->service_id;
806 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
807 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
808 cm_req_set_resp_res(req_msg, param->responder_resources);
809 cm_req_set_init_depth(req_msg, param->initiator_depth);
810 cm_req_set_remote_resp_timeout(req_msg,
811 param->remote_cm_response_timeout);
812 cm_req_set_qp_type(req_msg, param->qp_type);
813 cm_req_set_flow_ctrl(req_msg, param->flow_control);
814 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
815 cm_req_set_local_resp_timeout(req_msg,
816 param->local_cm_response_timeout);
817 cm_req_set_retry_count(req_msg, param->retry_count);
818 req_msg->pkey = param->primary_path->pkey;
819 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
820 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
821 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
822 cm_req_set_srq(req_msg, param->srq);
823
824 req_msg->primary_local_lid = param->primary_path->slid;
825 req_msg->primary_remote_lid = param->primary_path->dlid;
826 req_msg->primary_local_gid = param->primary_path->sgid;
827 req_msg->primary_remote_gid = param->primary_path->dgid;
828 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
829 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
830 req_msg->primary_traffic_class = param->primary_path->traffic_class;
831 req_msg->primary_hop_limit = param->primary_path->hop_limit;
832 cm_req_set_primary_sl(req_msg, param->primary_path->sl);
833 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
834 cm_req_set_primary_local_ack_timeout(req_msg,
835 min(31, param->primary_path->packet_life_time + 1));
836
837 if (param->alternate_path) {
838 req_msg->alt_local_lid = param->alternate_path->slid;
839 req_msg->alt_remote_lid = param->alternate_path->dlid;
840 req_msg->alt_local_gid = param->alternate_path->sgid;
841 req_msg->alt_remote_gid = param->alternate_path->dgid;
842 cm_req_set_alt_flow_label(req_msg,
843 param->alternate_path->flow_label);
844 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
845 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
846 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
847 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
848 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
849 cm_req_set_alt_local_ack_timeout(req_msg,
850 min(31, param->alternate_path->packet_life_time + 1));
851 }
852
853 if (param->private_data && param->private_data_len)
854 memcpy(req_msg->private_data, param->private_data,
855 param->private_data_len);
856}
857
858static inline int cm_validate_req_param(struct ib_cm_req_param *param)
859{
860 /* peer-to-peer not supported */
861 if (param->peer_to_peer)
862 return -EINVAL;
863
864 if (!param->primary_path)
865 return -EINVAL;
866
867 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
868 return -EINVAL;
869
870 if (param->private_data &&
871 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
872 return -EINVAL;
873
874 if (param->alternate_path &&
875 (param->alternate_path->pkey != param->primary_path->pkey ||
876 param->alternate_path->mtu != param->primary_path->mtu))
877 return -EINVAL;
878
879 return 0;
880}
881
882int ib_send_cm_req(struct ib_cm_id *cm_id,
883 struct ib_cm_req_param *param)
884{
885 struct cm_id_private *cm_id_priv;
886 struct ib_send_wr *bad_send_wr;
887 struct cm_req_msg *req_msg;
888 unsigned long flags;
889 int ret;
890
891 ret = cm_validate_req_param(param);
892 if (ret)
893 return ret;
894
895 /* Verify that we're not in timewait. */
896 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
897 spin_lock_irqsave(&cm_id_priv->lock, flags);
898 if (cm_id->state != IB_CM_IDLE) {
899 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
900 ret = -EINVAL;
901 goto out;
902 }
903 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
904
905 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
906 id.local_id);
907 if (IS_ERR(cm_id_priv->timewait_info))
908 goto out;
909
910 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
911 if (ret)
912 goto error1;
913 if (param->alternate_path) {
914 ret = cm_init_av_by_path(param->alternate_path,
915 &cm_id_priv->alt_av);
916 if (ret)
917 goto error1;
918 }
919 cm_id->service_id = param->service_id;
97f52eb4 920 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
921 cm_id_priv->timeout_ms = cm_convert_to_ms(
922 param->primary_path->packet_life_time) * 2 +
923 cm_convert_to_ms(
924 param->remote_cm_response_timeout);
925 cm_id_priv->max_cm_retries = param->max_cm_retries;
926 cm_id_priv->initiator_depth = param->initiator_depth;
927 cm_id_priv->responder_resources = param->responder_resources;
928 cm_id_priv->retry_count = param->retry_count;
929 cm_id_priv->path_mtu = param->primary_path->mtu;
ae7971a7 930 cm_id_priv->qp_type = param->qp_type;
a977049d
HR
931
932 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
933 if (ret)
934 goto error1;
935
936 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
937 cm_format_req(req_msg, cm_id_priv, param);
938 cm_id_priv->tid = req_msg->hdr.tid;
939 cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
940 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
941
942 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
943 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
944 cm_id_priv->local_ack_timeout =
945 cm_req_get_primary_local_ack_timeout(req_msg);
946
947 spin_lock_irqsave(&cm_id_priv->lock, flags);
948 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
949 &cm_id_priv->msg->send_wr, &bad_send_wr);
950 if (ret) {
951 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
952 goto error2;
953 }
954 BUG_ON(cm_id->state != IB_CM_IDLE);
955 cm_id->state = IB_CM_REQ_SENT;
956 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
957 return 0;
958
959error2: cm_free_msg(cm_id_priv->msg);
960error1: kfree(cm_id_priv->timewait_info);
961out: return ret;
962}
963EXPORT_SYMBOL(ib_send_cm_req);
964
965static int cm_issue_rej(struct cm_port *port,
966 struct ib_mad_recv_wc *mad_recv_wc,
967 enum ib_cm_rej_reason reason,
968 enum cm_msg_response msg_rejected,
969 void *ari, u8 ari_length)
970{
971 struct ib_mad_send_buf *msg = NULL;
972 struct ib_send_wr *bad_send_wr;
973 struct cm_rej_msg *rej_msg, *rcv_msg;
974 int ret;
975
976 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
977 if (ret)
978 return ret;
979
980 /* We just need common CM header information. Cast to any message. */
981 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
982 rej_msg = (struct cm_rej_msg *) msg->mad;
983
984 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
985 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
986 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
987 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
97f52eb4 988 rej_msg->reason = cpu_to_be16(reason);
a977049d
HR
989
990 if (ari && ari_length) {
991 cm_rej_set_reject_info_len(rej_msg, ari_length);
992 memcpy(rej_msg->ari, ari, ari_length);
993 }
994
995 ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
996 if (ret)
997 cm_free_msg(msg);
998
999 return ret;
1000}
1001
97f52eb4
SH
1002static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1003 __be32 local_qpn, __be32 remote_qpn)
a977049d
HR
1004{
1005 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1006 ((local_ca_guid == remote_ca_guid) &&
1007 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1008}
1009
1010static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1011 struct ib_sa_path_rec *primary_path,
1012 struct ib_sa_path_rec *alt_path)
1013{
1014 memset(primary_path, 0, sizeof *primary_path);
1015 primary_path->dgid = req_msg->primary_local_gid;
1016 primary_path->sgid = req_msg->primary_remote_gid;
1017 primary_path->dlid = req_msg->primary_local_lid;
1018 primary_path->slid = req_msg->primary_remote_lid;
1019 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1020 primary_path->hop_limit = req_msg->primary_hop_limit;
1021 primary_path->traffic_class = req_msg->primary_traffic_class;
1022 primary_path->reversible = 1;
1023 primary_path->pkey = req_msg->pkey;
1024 primary_path->sl = cm_req_get_primary_sl(req_msg);
1025 primary_path->mtu_selector = IB_SA_EQ;
1026 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1027 primary_path->rate_selector = IB_SA_EQ;
1028 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1029 primary_path->packet_life_time_selector = IB_SA_EQ;
1030 primary_path->packet_life_time =
1031 cm_req_get_primary_local_ack_timeout(req_msg);
1032 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1033
1034 if (req_msg->alt_local_lid) {
1035 memset(alt_path, 0, sizeof *alt_path);
1036 alt_path->dgid = req_msg->alt_local_gid;
1037 alt_path->sgid = req_msg->alt_remote_gid;
1038 alt_path->dlid = req_msg->alt_local_lid;
1039 alt_path->slid = req_msg->alt_remote_lid;
1040 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1041 alt_path->hop_limit = req_msg->alt_hop_limit;
1042 alt_path->traffic_class = req_msg->alt_traffic_class;
1043 alt_path->reversible = 1;
1044 alt_path->pkey = req_msg->pkey;
1045 alt_path->sl = cm_req_get_alt_sl(req_msg);
1046 alt_path->mtu_selector = IB_SA_EQ;
1047 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1048 alt_path->rate_selector = IB_SA_EQ;
1049 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1050 alt_path->packet_life_time_selector = IB_SA_EQ;
1051 alt_path->packet_life_time =
1052 cm_req_get_alt_local_ack_timeout(req_msg);
1053 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1054 }
1055}
1056
1057static void cm_format_req_event(struct cm_work *work,
1058 struct cm_id_private *cm_id_priv,
1059 struct ib_cm_id *listen_id)
1060{
1061 struct cm_req_msg *req_msg;
1062 struct ib_cm_req_event_param *param;
1063
1064 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1065 param = &work->cm_event.param.req_rcvd;
1066 param->listen_id = listen_id;
a977049d
HR
1067 param->port = cm_id_priv->av.port->port_num;
1068 param->primary_path = &work->path[0];
1069 if (req_msg->alt_local_lid)
1070 param->alternate_path = &work->path[1];
1071 else
1072 param->alternate_path = NULL;
1073 param->remote_ca_guid = req_msg->local_ca_guid;
1074 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1075 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1076 param->qp_type = cm_req_get_qp_type(req_msg);
1077 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1078 param->responder_resources = cm_req_get_init_depth(req_msg);
1079 param->initiator_depth = cm_req_get_resp_res(req_msg);
1080 param->local_cm_response_timeout =
1081 cm_req_get_remote_resp_timeout(req_msg);
1082 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1083 param->remote_cm_response_timeout =
1084 cm_req_get_local_resp_timeout(req_msg);
1085 param->retry_count = cm_req_get_retry_count(req_msg);
1086 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1087 param->srq = cm_req_get_srq(req_msg);
1088 work->cm_event.private_data = &req_msg->private_data;
1089}
1090
1091static void cm_process_work(struct cm_id_private *cm_id_priv,
1092 struct cm_work *work)
1093{
1094 unsigned long flags;
1095 int ret;
1096
1097 /* We will typically only have the current event to report. */
1098 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1099 cm_free_work(work);
1100
1101 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1102 spin_lock_irqsave(&cm_id_priv->lock, flags);
1103 work = cm_dequeue_work(cm_id_priv);
1104 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1105 BUG_ON(!work);
1106 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1107 &work->cm_event);
1108 cm_free_work(work);
1109 }
1110 cm_deref_id(cm_id_priv);
1111 if (ret)
1112 ib_destroy_cm_id(&cm_id_priv->id);
1113}
1114
1115static void cm_format_mra(struct cm_mra_msg *mra_msg,
1116 struct cm_id_private *cm_id_priv,
1117 enum cm_msg_response msg_mraed, u8 service_timeout,
1118 const void *private_data, u8 private_data_len)
1119{
1120 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1121 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1122 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1123 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1124 cm_mra_set_service_timeout(mra_msg, service_timeout);
1125
1126 if (private_data && private_data_len)
1127 memcpy(mra_msg->private_data, private_data, private_data_len);
1128}
1129
1130static void cm_format_rej(struct cm_rej_msg *rej_msg,
1131 struct cm_id_private *cm_id_priv,
1132 enum ib_cm_rej_reason reason,
1133 void *ari,
1134 u8 ari_length,
1135 const void *private_data,
1136 u8 private_data_len)
1137{
1138 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1139 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1140
1141 switch(cm_id_priv->id.state) {
1142 case IB_CM_REQ_RCVD:
1143 rej_msg->local_comm_id = 0;
1144 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1145 break;
1146 case IB_CM_MRA_REQ_SENT:
1147 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1148 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1149 break;
1150 case IB_CM_REP_RCVD:
1151 case IB_CM_MRA_REP_SENT:
1152 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1153 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1154 break;
1155 default:
1156 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1157 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1158 break;
1159 }
1160
97f52eb4 1161 rej_msg->reason = cpu_to_be16(reason);
a977049d
HR
1162 if (ari && ari_length) {
1163 cm_rej_set_reject_info_len(rej_msg, ari_length);
1164 memcpy(rej_msg->ari, ari, ari_length);
1165 }
1166
1167 if (private_data && private_data_len)
1168 memcpy(rej_msg->private_data, private_data, private_data_len);
1169}
1170
1171static void cm_dup_req_handler(struct cm_work *work,
1172 struct cm_id_private *cm_id_priv)
1173{
1174 struct ib_mad_send_buf *msg = NULL;
1175 struct ib_send_wr *bad_send_wr;
1176 unsigned long flags;
1177 int ret;
1178
1179 /* Quick state check to discard duplicate REQs. */
1180 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1181 return;
1182
1183 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1184 if (ret)
1185 return;
1186
1187 spin_lock_irqsave(&cm_id_priv->lock, flags);
1188 switch (cm_id_priv->id.state) {
1189 case IB_CM_MRA_REQ_SENT:
1190 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1191 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1192 cm_id_priv->private_data,
1193 cm_id_priv->private_data_len);
1194 break;
1195 case IB_CM_TIMEWAIT:
1196 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1197 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1198 break;
1199 default:
1200 goto unlock;
1201 }
1202 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1203
1204 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1205 &bad_send_wr);
1206 if (ret)
1207 goto free;
1208 return;
1209
1210unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1211free: cm_free_msg(msg);
1212}
1213
1214static struct cm_id_private * cm_match_req(struct cm_work *work,
1215 struct cm_id_private *cm_id_priv)
1216{
1217 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1218 struct cm_timewait_info *timewait_info;
1219 struct cm_req_msg *req_msg;
1220 unsigned long flags;
1221
1222 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1223
1224 /* Check for duplicate REQ and stale connections. */
1225 spin_lock_irqsave(&cm.lock, flags);
1226 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1227 if (!timewait_info)
1228 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1229
1230 if (timewait_info) {
1231 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1232 timewait_info->work.remote_id);
1233 spin_unlock_irqrestore(&cm.lock, flags);
1234 if (cur_cm_id_priv) {
1235 cm_dup_req_handler(work, cur_cm_id_priv);
1236 cm_deref_id(cur_cm_id_priv);
1237 } else
1238 cm_issue_rej(work->port, work->mad_recv_wc,
1239 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1240 NULL, 0);
1241 goto error;
1242 }
1243
1244 /* Find matching listen request. */
07d357d0
SH
1245 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1246 req_msg->service_id);
a977049d
HR
1247 if (!listen_cm_id_priv) {
1248 spin_unlock_irqrestore(&cm.lock, flags);
1249 cm_issue_rej(work->port, work->mad_recv_wc,
1250 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1251 NULL, 0);
1252 goto error;
1253 }
1254 atomic_inc(&listen_cm_id_priv->refcount);
1255 atomic_inc(&cm_id_priv->refcount);
1256 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1257 atomic_inc(&cm_id_priv->work_count);
1258 spin_unlock_irqrestore(&cm.lock, flags);
1259 return listen_cm_id_priv;
1260
1261error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1262 return NULL;
1263}
1264
1265static int cm_req_handler(struct cm_work *work)
1266{
1267 struct ib_cm_id *cm_id;
1268 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1269 struct cm_req_msg *req_msg;
1270 int ret;
1271
1272 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1273
07d357d0 1274 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
a977049d
HR
1275 if (IS_ERR(cm_id))
1276 return PTR_ERR(cm_id);
1277
1278 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1279 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1280 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1281 &cm_id_priv->av);
1282 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1283 id.local_id);
1284 if (IS_ERR(cm_id_priv->timewait_info)) {
1285 ret = PTR_ERR(cm_id_priv->timewait_info);
1286 goto error1;
1287 }
1288 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1289 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1290 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1291
1292 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1293 if (!listen_cm_id_priv) {
1294 ret = -EINVAL;
1295 goto error2;
1296 }
1297
1298 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1299 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1300 cm_id_priv->id.service_id = req_msg->service_id;
97f52eb4 1301 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
1302
1303 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1304 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1305 if (ret)
1306 goto error3;
1307 if (req_msg->alt_local_lid) {
1308 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1309 if (ret)
1310 goto error3;
1311 }
1312 cm_id_priv->tid = req_msg->hdr.tid;
1313 cm_id_priv->timeout_ms = cm_convert_to_ms(
1314 cm_req_get_local_resp_timeout(req_msg));
1315 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1316 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1317 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1318 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1319 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1320 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1321 cm_id_priv->local_ack_timeout =
1322 cm_req_get_primary_local_ack_timeout(req_msg);
1323 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1324 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
ae7971a7 1325 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
a977049d
HR
1326
1327 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1328 cm_process_work(cm_id_priv, work);
1329 cm_deref_id(listen_cm_id_priv);
1330 return 0;
1331
1332error3: atomic_dec(&cm_id_priv->refcount);
1333 cm_deref_id(listen_cm_id_priv);
1334 cm_cleanup_timewait(cm_id_priv->timewait_info);
1335error2: kfree(cm_id_priv->timewait_info);
1b205c2d 1336 cm_id_priv->timewait_info = NULL;
a977049d
HR
1337error1: ib_destroy_cm_id(&cm_id_priv->id);
1338 return ret;
1339}
1340
1341static void cm_format_rep(struct cm_rep_msg *rep_msg,
1342 struct cm_id_private *cm_id_priv,
1343 struct ib_cm_rep_param *param)
1344{
1345 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1346 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1347 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1348 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1349 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1350 rep_msg->resp_resources = param->responder_resources;
1351 rep_msg->initiator_depth = param->initiator_depth;
1352 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1353 cm_rep_set_failover(rep_msg, param->failover_accepted);
1354 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1355 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1356 cm_rep_set_srq(rep_msg, param->srq);
1357 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1358
1359 if (param->private_data && param->private_data_len)
1360 memcpy(rep_msg->private_data, param->private_data,
1361 param->private_data_len);
1362}
1363
1364int ib_send_cm_rep(struct ib_cm_id *cm_id,
1365 struct ib_cm_rep_param *param)
1366{
1367 struct cm_id_private *cm_id_priv;
1368 struct ib_mad_send_buf *msg;
1369 struct cm_rep_msg *rep_msg;
1370 struct ib_send_wr *bad_send_wr;
1371 unsigned long flags;
1372 int ret;
1373
1374 if (param->private_data &&
1375 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1376 return -EINVAL;
1377
1378 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1379 spin_lock_irqsave(&cm_id_priv->lock, flags);
1380 if (cm_id->state != IB_CM_REQ_RCVD &&
1381 cm_id->state != IB_CM_MRA_REQ_SENT) {
1382 ret = -EINVAL;
1383 goto out;
1384 }
1385
1386 ret = cm_alloc_msg(cm_id_priv, &msg);
1387 if (ret)
1388 goto out;
1389
1390 rep_msg = (struct cm_rep_msg *) msg->mad;
1391 cm_format_rep(rep_msg, cm_id_priv, param);
1392 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1393 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1394
1395 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1396 &msg->send_wr, &bad_send_wr);
1397 if (ret) {
1398 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1399 cm_free_msg(msg);
1400 return ret;
1401 }
1402
1403 cm_id->state = IB_CM_REP_SENT;
1404 cm_id_priv->msg = msg;
1405 cm_id_priv->initiator_depth = param->initiator_depth;
1406 cm_id_priv->responder_resources = param->responder_resources;
1407 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1408 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1409
1410out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1411 return ret;
1412}
1413EXPORT_SYMBOL(ib_send_cm_rep);
1414
1415static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1416 struct cm_id_private *cm_id_priv,
1417 const void *private_data,
1418 u8 private_data_len)
1419{
1420 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1421 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1422 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1423
1424 if (private_data && private_data_len)
1425 memcpy(rtu_msg->private_data, private_data, private_data_len);
1426}
1427
1428int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1429 const void *private_data,
1430 u8 private_data_len)
1431{
1432 struct cm_id_private *cm_id_priv;
1433 struct ib_mad_send_buf *msg;
1434 struct ib_send_wr *bad_send_wr;
1435 unsigned long flags;
1436 void *data;
1437 int ret;
1438
1439 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1440 return -EINVAL;
1441
1442 data = cm_copy_private_data(private_data, private_data_len);
1443 if (IS_ERR(data))
1444 return PTR_ERR(data);
1445
1446 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1447 spin_lock_irqsave(&cm_id_priv->lock, flags);
1448 if (cm_id->state != IB_CM_REP_RCVD &&
1449 cm_id->state != IB_CM_MRA_REP_SENT) {
1450 ret = -EINVAL;
1451 goto error;
1452 }
1453
1454 ret = cm_alloc_msg(cm_id_priv, &msg);
1455 if (ret)
1456 goto error;
1457
1458 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1459 private_data, private_data_len);
1460
1461 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1462 &msg->send_wr, &bad_send_wr);
1463 if (ret) {
1464 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1465 cm_free_msg(msg);
1466 kfree(data);
1467 return ret;
1468 }
1469
1470 cm_id->state = IB_CM_ESTABLISHED;
1471 cm_set_private_data(cm_id_priv, data, private_data_len);
1472 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1473 return 0;
1474
1475error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1476 kfree(data);
1477 return ret;
1478}
1479EXPORT_SYMBOL(ib_send_cm_rtu);
1480
1481static void cm_format_rep_event(struct cm_work *work)
1482{
1483 struct cm_rep_msg *rep_msg;
1484 struct ib_cm_rep_event_param *param;
1485
1486 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1487 param = &work->cm_event.param.rep_rcvd;
1488 param->remote_ca_guid = rep_msg->local_ca_guid;
1489 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1490 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1491 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1492 param->responder_resources = rep_msg->initiator_depth;
1493 param->initiator_depth = rep_msg->resp_resources;
1494 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1495 param->failover_accepted = cm_rep_get_failover(rep_msg);
1496 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1497 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1498 param->srq = cm_rep_get_srq(rep_msg);
1499 work->cm_event.private_data = &rep_msg->private_data;
1500}
1501
1502static void cm_dup_rep_handler(struct cm_work *work)
1503{
1504 struct cm_id_private *cm_id_priv;
1505 struct cm_rep_msg *rep_msg;
1506 struct ib_mad_send_buf *msg = NULL;
1507 struct ib_send_wr *bad_send_wr;
1508 unsigned long flags;
1509 int ret;
1510
1511 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1512 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1513 rep_msg->local_comm_id);
1514 if (!cm_id_priv)
1515 return;
1516
1517 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1518 if (ret)
1519 goto deref;
1520
1521 spin_lock_irqsave(&cm_id_priv->lock, flags);
1522 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1523 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1524 cm_id_priv->private_data,
1525 cm_id_priv->private_data_len);
1526 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1527 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1528 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1529 cm_id_priv->private_data,
1530 cm_id_priv->private_data_len);
1531 else
1532 goto unlock;
1533 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1534
1535 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1536 &bad_send_wr);
1537 if (ret)
1538 goto free;
1539 goto deref;
1540
1541unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1542free: cm_free_msg(msg);
1543deref: cm_deref_id(cm_id_priv);
1544}
1545
1546static int cm_rep_handler(struct cm_work *work)
1547{
1548 struct cm_id_private *cm_id_priv;
1549 struct cm_rep_msg *rep_msg;
1550 unsigned long flags;
1551 int ret;
1552
1553 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1554 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1555 if (!cm_id_priv) {
1556 cm_dup_rep_handler(work);
1557 return -EINVAL;
1558 }
1559
1560 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1561 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1562 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1563
1564 spin_lock_irqsave(&cm.lock, flags);
1565 /* Check for duplicate REP. */
1566 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1567 spin_unlock_irqrestore(&cm.lock, flags);
1568 ret = -EINVAL;
1569 goto error;
1570 }
1571 /* Check for a stale connection. */
1572 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1573 spin_unlock_irqrestore(&cm.lock, flags);
1574 cm_issue_rej(work->port, work->mad_recv_wc,
1575 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1576 NULL, 0);
1577 ret = -EINVAL;
1578 goto error;
1579 }
1580 spin_unlock_irqrestore(&cm.lock, flags);
1581
1582 cm_format_rep_event(work);
1583
1584 spin_lock_irqsave(&cm_id_priv->lock, flags);
1585 switch (cm_id_priv->id.state) {
1586 case IB_CM_REQ_SENT:
1587 case IB_CM_MRA_REQ_RCVD:
1588 break;
1589 default:
1590 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1591 ret = -EINVAL;
1592 goto error;
1593 }
1594 cm_id_priv->id.state = IB_CM_REP_RCVD;
1595 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1596 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1597 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1598 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1599 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1600 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1601
1602 /* todo: handle peer_to_peer */
1603
1604 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1605 (unsigned long) cm_id_priv->msg);
1606 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1607 if (!ret)
1608 list_add_tail(&work->list, &cm_id_priv->work_list);
1609 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1610
1611 if (ret)
1612 cm_process_work(cm_id_priv, work);
1613 else
1614 cm_deref_id(cm_id_priv);
1615 return 0;
1616
1617error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1618 cm_deref_id(cm_id_priv);
1619 return ret;
1620}
1621
1622static int cm_establish_handler(struct cm_work *work)
1623{
1624 struct cm_id_private *cm_id_priv;
1625 unsigned long flags;
1626 int ret;
1627
1628 /* See comment in ib_cm_establish about lookup. */
1629 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1630 if (!cm_id_priv)
1631 return -EINVAL;
1632
1633 spin_lock_irqsave(&cm_id_priv->lock, flags);
1634 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1635 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1636 goto out;
1637 }
1638
1639 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1640 (unsigned long) cm_id_priv->msg);
1641 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1642 if (!ret)
1643 list_add_tail(&work->list, &cm_id_priv->work_list);
1644 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1645
1646 if (ret)
1647 cm_process_work(cm_id_priv, work);
1648 else
1649 cm_deref_id(cm_id_priv);
1650 return 0;
1651out:
1652 cm_deref_id(cm_id_priv);
1653 return -EINVAL;
1654}
1655
1656static int cm_rtu_handler(struct cm_work *work)
1657{
1658 struct cm_id_private *cm_id_priv;
1659 struct cm_rtu_msg *rtu_msg;
1660 unsigned long flags;
1661 int ret;
1662
1663 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1664 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1665 rtu_msg->local_comm_id);
1666 if (!cm_id_priv)
1667 return -EINVAL;
1668
1669 work->cm_event.private_data = &rtu_msg->private_data;
1670
1671 spin_lock_irqsave(&cm_id_priv->lock, flags);
1672 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1673 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1674 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1675 goto out;
1676 }
1677 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1678
1679 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1680 (unsigned long) cm_id_priv->msg);
1681 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1682 if (!ret)
1683 list_add_tail(&work->list, &cm_id_priv->work_list);
1684 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1685
1686 if (ret)
1687 cm_process_work(cm_id_priv, work);
1688 else
1689 cm_deref_id(cm_id_priv);
1690 return 0;
1691out:
1692 cm_deref_id(cm_id_priv);
1693 return -EINVAL;
1694}
1695
1696static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1697 struct cm_id_private *cm_id_priv,
1698 const void *private_data,
1699 u8 private_data_len)
1700{
1701 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1702 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1703 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1704 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1705 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1706
1707 if (private_data && private_data_len)
1708 memcpy(dreq_msg->private_data, private_data, private_data_len);
1709}
1710
1711int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1712 const void *private_data,
1713 u8 private_data_len)
1714{
1715 struct cm_id_private *cm_id_priv;
1716 struct ib_mad_send_buf *msg;
1717 struct ib_send_wr *bad_send_wr;
1718 unsigned long flags;
1719 int ret;
1720
1721 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1722 return -EINVAL;
1723
1724 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1725 spin_lock_irqsave(&cm_id_priv->lock, flags);
1726 if (cm_id->state != IB_CM_ESTABLISHED) {
1727 ret = -EINVAL;
1728 goto out;
1729 }
1730
1731 ret = cm_alloc_msg(cm_id_priv, &msg);
1732 if (ret) {
1733 cm_enter_timewait(cm_id_priv);
1734 goto out;
1735 }
1736
1737 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1738 private_data, private_data_len);
1739 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1740 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1741
1742 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1743 &msg->send_wr, &bad_send_wr);
1744 if (ret) {
1745 cm_enter_timewait(cm_id_priv);
1746 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1747 cm_free_msg(msg);
1748 return ret;
1749 }
1750
1751 cm_id->state = IB_CM_DREQ_SENT;
1752 cm_id_priv->msg = msg;
1753out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1754 return ret;
1755}
1756EXPORT_SYMBOL(ib_send_cm_dreq);
1757
1758static void cm_format_drep(struct cm_drep_msg *drep_msg,
1759 struct cm_id_private *cm_id_priv,
1760 const void *private_data,
1761 u8 private_data_len)
1762{
1763 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1764 drep_msg->local_comm_id = cm_id_priv->id.local_id;
1765 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1766
1767 if (private_data && private_data_len)
1768 memcpy(drep_msg->private_data, private_data, private_data_len);
1769}
1770
1771int ib_send_cm_drep(struct ib_cm_id *cm_id,
1772 const void *private_data,
1773 u8 private_data_len)
1774{
1775 struct cm_id_private *cm_id_priv;
1776 struct ib_mad_send_buf *msg;
1777 struct ib_send_wr *bad_send_wr;
1778 unsigned long flags;
1779 void *data;
1780 int ret;
1781
1782 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1783 return -EINVAL;
1784
1785 data = cm_copy_private_data(private_data, private_data_len);
1786 if (IS_ERR(data))
1787 return PTR_ERR(data);
1788
1789 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1790 spin_lock_irqsave(&cm_id_priv->lock, flags);
1791 if (cm_id->state != IB_CM_DREQ_RCVD) {
1792 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1793 kfree(data);
1794 return -EINVAL;
1795 }
1796
1797 cm_set_private_data(cm_id_priv, data, private_data_len);
1798 cm_enter_timewait(cm_id_priv);
1799
1800 ret = cm_alloc_msg(cm_id_priv, &msg);
1801 if (ret)
1802 goto out;
1803
1804 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1805 private_data, private_data_len);
1806
1807 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1808 &bad_send_wr);
1809 if (ret) {
1810 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1811 cm_free_msg(msg);
1812 return ret;
1813 }
1814
1815out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1816 return ret;
1817}
1818EXPORT_SYMBOL(ib_send_cm_drep);
1819
1820static int cm_dreq_handler(struct cm_work *work)
1821{
1822 struct cm_id_private *cm_id_priv;
1823 struct cm_dreq_msg *dreq_msg;
1824 struct ib_mad_send_buf *msg = NULL;
1825 struct ib_send_wr *bad_send_wr;
1826 unsigned long flags;
1827 int ret;
1828
1829 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1830 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1831 dreq_msg->local_comm_id);
1832 if (!cm_id_priv)
1833 return -EINVAL;
1834
1835 work->cm_event.private_data = &dreq_msg->private_data;
1836
1837 spin_lock_irqsave(&cm_id_priv->lock, flags);
1838 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1839 goto unlock;
1840
1841 switch (cm_id_priv->id.state) {
1842 case IB_CM_REP_SENT:
1843 case IB_CM_DREQ_SENT:
1844 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1845 (unsigned long) cm_id_priv->msg);
1846 break;
1847 case IB_CM_ESTABLISHED:
1848 case IB_CM_MRA_REP_RCVD:
1849 break;
1850 case IB_CM_TIMEWAIT:
1851 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1852 goto unlock;
1853
1854 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1855 cm_id_priv->private_data,
1856 cm_id_priv->private_data_len);
1857 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1858
1859 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1860 &msg->send_wr, &bad_send_wr))
1861 cm_free_msg(msg);
1862 goto deref;
1863 default:
1864 goto unlock;
1865 }
1866 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1867 cm_id_priv->tid = dreq_msg->hdr.tid;
1868 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1869 if (!ret)
1870 list_add_tail(&work->list, &cm_id_priv->work_list);
1871 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1872
1873 if (ret)
1874 cm_process_work(cm_id_priv, work);
1875 else
1876 cm_deref_id(cm_id_priv);
1877 return 0;
1878
1879unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1880deref: cm_deref_id(cm_id_priv);
1881 return -EINVAL;
1882}
1883
1884static int cm_drep_handler(struct cm_work *work)
1885{
1886 struct cm_id_private *cm_id_priv;
1887 struct cm_drep_msg *drep_msg;
1888 unsigned long flags;
1889 int ret;
1890
1891 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1892 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1893 drep_msg->local_comm_id);
1894 if (!cm_id_priv)
1895 return -EINVAL;
1896
1897 work->cm_event.private_data = &drep_msg->private_data;
1898
1899 spin_lock_irqsave(&cm_id_priv->lock, flags);
1900 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1901 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1902 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1903 goto out;
1904 }
1905 cm_enter_timewait(cm_id_priv);
1906
1907 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1908 (unsigned long) cm_id_priv->msg);
1909 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1910 if (!ret)
1911 list_add_tail(&work->list, &cm_id_priv->work_list);
1912 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1913
1914 if (ret)
1915 cm_process_work(cm_id_priv, work);
1916 else
1917 cm_deref_id(cm_id_priv);
1918 return 0;
1919out:
1920 cm_deref_id(cm_id_priv);
1921 return -EINVAL;
1922}
1923
1924int ib_send_cm_rej(struct ib_cm_id *cm_id,
1925 enum ib_cm_rej_reason reason,
1926 void *ari,
1927 u8 ari_length,
1928 const void *private_data,
1929 u8 private_data_len)
1930{
1931 struct cm_id_private *cm_id_priv;
1932 struct ib_mad_send_buf *msg;
1933 struct ib_send_wr *bad_send_wr;
1934 unsigned long flags;
1935 int ret;
1936
1937 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1938 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1939 return -EINVAL;
1940
1941 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1942
1943 spin_lock_irqsave(&cm_id_priv->lock, flags);
1944 switch (cm_id->state) {
1945 case IB_CM_REQ_SENT:
1946 case IB_CM_MRA_REQ_RCVD:
1947 case IB_CM_REQ_RCVD:
1948 case IB_CM_MRA_REQ_SENT:
1949 case IB_CM_REP_RCVD:
1950 case IB_CM_MRA_REP_SENT:
1951 ret = cm_alloc_msg(cm_id_priv, &msg);
1952 if (!ret)
1953 cm_format_rej((struct cm_rej_msg *) msg->mad,
1954 cm_id_priv, reason, ari, ari_length,
1955 private_data, private_data_len);
1956
1957 cm_reset_to_idle(cm_id_priv);
1958 break;
1959 case IB_CM_REP_SENT:
1960 case IB_CM_MRA_REP_RCVD:
1961 ret = cm_alloc_msg(cm_id_priv, &msg);
1962 if (!ret)
1963 cm_format_rej((struct cm_rej_msg *) msg->mad,
1964 cm_id_priv, reason, ari, ari_length,
1965 private_data, private_data_len);
1966
1967 cm_enter_timewait(cm_id_priv);
1968 break;
1969 default:
1970 ret = -EINVAL;
1971 goto out;
1972 }
1973
1974 if (ret)
1975 goto out;
1976
1977 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1978 &msg->send_wr, &bad_send_wr);
1979 if (ret)
1980 cm_free_msg(msg);
1981
1982out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1983 return ret;
1984}
1985EXPORT_SYMBOL(ib_send_cm_rej);
1986
1987static void cm_format_rej_event(struct cm_work *work)
1988{
1989 struct cm_rej_msg *rej_msg;
1990 struct ib_cm_rej_event_param *param;
1991
1992 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1993 param = &work->cm_event.param.rej_rcvd;
1994 param->ari = rej_msg->ari;
1995 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
97f52eb4 1996 param->reason = __be16_to_cpu(rej_msg->reason);
a977049d
HR
1997 work->cm_event.private_data = &rej_msg->private_data;
1998}
1999
2000static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2001{
2002 struct cm_timewait_info *timewait_info;
2003 struct cm_id_private *cm_id_priv;
2004 unsigned long flags;
97f52eb4 2005 __be32 remote_id;
a977049d
HR
2006
2007 remote_id = rej_msg->local_comm_id;
2008
97f52eb4 2009 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
a977049d 2010 spin_lock_irqsave(&cm.lock, flags);
97f52eb4 2011 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
a977049d
HR
2012 remote_id);
2013 if (!timewait_info) {
2014 spin_unlock_irqrestore(&cm.lock, flags);
2015 return NULL;
2016 }
2017 cm_id_priv = idr_find(&cm.local_id_table,
97f52eb4 2018 (__force int) timewait_info->work.local_id);
a977049d
HR
2019 if (cm_id_priv) {
2020 if (cm_id_priv->id.remote_id == remote_id)
2021 atomic_inc(&cm_id_priv->refcount);
2022 else
2023 cm_id_priv = NULL;
2024 }
2025 spin_unlock_irqrestore(&cm.lock, flags);
2026 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2027 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2028 else
2029 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2030
2031 return cm_id_priv;
2032}
2033
2034static int cm_rej_handler(struct cm_work *work)
2035{
2036 struct cm_id_private *cm_id_priv;
2037 struct cm_rej_msg *rej_msg;
2038 unsigned long flags;
2039 int ret;
2040
2041 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2042 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2043 if (!cm_id_priv)
2044 return -EINVAL;
2045
2046 cm_format_rej_event(work);
2047
2048 spin_lock_irqsave(&cm_id_priv->lock, flags);
2049 switch (cm_id_priv->id.state) {
2050 case IB_CM_REQ_SENT:
2051 case IB_CM_MRA_REQ_RCVD:
2052 case IB_CM_REP_SENT:
2053 case IB_CM_MRA_REP_RCVD:
2054 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2055 (unsigned long) cm_id_priv->msg);
2056 /* fall through */
2057 case IB_CM_REQ_RCVD:
2058 case IB_CM_MRA_REQ_SENT:
97f52eb4 2059 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
a977049d
HR
2060 cm_enter_timewait(cm_id_priv);
2061 else
2062 cm_reset_to_idle(cm_id_priv);
2063 break;
2064 case IB_CM_DREQ_SENT:
2065 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2066 (unsigned long) cm_id_priv->msg);
2067 /* fall through */
2068 case IB_CM_REP_RCVD:
2069 case IB_CM_MRA_REP_SENT:
2070 case IB_CM_ESTABLISHED:
2071 cm_enter_timewait(cm_id_priv);
2072 break;
2073 default:
2074 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2075 ret = -EINVAL;
2076 goto out;
2077 }
2078
2079 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2080 if (!ret)
2081 list_add_tail(&work->list, &cm_id_priv->work_list);
2082 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2083
2084 if (ret)
2085 cm_process_work(cm_id_priv, work);
2086 else
2087 cm_deref_id(cm_id_priv);
2088 return 0;
2089out:
2090 cm_deref_id(cm_id_priv);
2091 return -EINVAL;
2092}
2093
2094int ib_send_cm_mra(struct ib_cm_id *cm_id,
2095 u8 service_timeout,
2096 const void *private_data,
2097 u8 private_data_len)
2098{
2099 struct cm_id_private *cm_id_priv;
2100 struct ib_mad_send_buf *msg;
2101 struct ib_send_wr *bad_send_wr;
2102 void *data;
2103 unsigned long flags;
2104 int ret;
2105
2106 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2107 return -EINVAL;
2108
2109 data = cm_copy_private_data(private_data, private_data_len);
2110 if (IS_ERR(data))
2111 return PTR_ERR(data);
2112
2113 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2114
2115 spin_lock_irqsave(&cm_id_priv->lock, flags);
2116 switch(cm_id_priv->id.state) {
2117 case IB_CM_REQ_RCVD:
2118 ret = cm_alloc_msg(cm_id_priv, &msg);
2119 if (ret)
2120 goto error1;
2121
2122 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2123 CM_MSG_RESPONSE_REQ, service_timeout,
2124 private_data, private_data_len);
2125 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2126 &msg->send_wr, &bad_send_wr);
2127 if (ret)
2128 goto error2;
2129 cm_id->state = IB_CM_MRA_REQ_SENT;
2130 break;
2131 case IB_CM_REP_RCVD:
2132 ret = cm_alloc_msg(cm_id_priv, &msg);
2133 if (ret)
2134 goto error1;
2135
2136 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2137 CM_MSG_RESPONSE_REP, service_timeout,
2138 private_data, private_data_len);
2139 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2140 &msg->send_wr, &bad_send_wr);
2141 if (ret)
2142 goto error2;
2143 cm_id->state = IB_CM_MRA_REP_SENT;
2144 break;
2145 case IB_CM_ESTABLISHED:
2146 ret = cm_alloc_msg(cm_id_priv, &msg);
2147 if (ret)
2148 goto error1;
2149
2150 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2151 CM_MSG_RESPONSE_OTHER, service_timeout,
2152 private_data, private_data_len);
2153 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2154 &msg->send_wr, &bad_send_wr);
2155 if (ret)
2156 goto error2;
2157 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2158 break;
2159 default:
2160 ret = -EINVAL;
2161 goto error1;
2162 }
2163 cm_id_priv->service_timeout = service_timeout;
2164 cm_set_private_data(cm_id_priv, data, private_data_len);
2165 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2166 return 0;
2167
2168error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2169 kfree(data);
2170 return ret;
2171
2172error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2173 kfree(data);
2174 cm_free_msg(msg);
2175 return ret;
2176}
2177EXPORT_SYMBOL(ib_send_cm_mra);
2178
2179static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2180{
2181 switch (cm_mra_get_msg_mraed(mra_msg)) {
2182 case CM_MSG_RESPONSE_REQ:
2183 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2184 case CM_MSG_RESPONSE_REP:
2185 case CM_MSG_RESPONSE_OTHER:
2186 return cm_acquire_id(mra_msg->remote_comm_id,
2187 mra_msg->local_comm_id);
2188 default:
2189 return NULL;
2190 }
2191}
2192
2193static int cm_mra_handler(struct cm_work *work)
2194{
2195 struct cm_id_private *cm_id_priv;
2196 struct cm_mra_msg *mra_msg;
2197 unsigned long flags;
2198 int timeout, ret;
2199
2200 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2201 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2202 if (!cm_id_priv)
2203 return -EINVAL;
2204
2205 work->cm_event.private_data = &mra_msg->private_data;
2206 work->cm_event.param.mra_rcvd.service_timeout =
2207 cm_mra_get_service_timeout(mra_msg);
2208 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2209 cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2210
2211 spin_lock_irqsave(&cm_id_priv->lock, flags);
2212 switch (cm_id_priv->id.state) {
2213 case IB_CM_REQ_SENT:
2214 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2215 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2216 (unsigned long) cm_id_priv->msg, timeout))
2217 goto out;
2218 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2219 break;
2220 case IB_CM_REP_SENT:
2221 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2222 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2223 (unsigned long) cm_id_priv->msg, timeout))
2224 goto out;
2225 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2226 break;
2227 case IB_CM_ESTABLISHED:
2228 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2229 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2230 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2231 (unsigned long) cm_id_priv->msg, timeout))
2232 goto out;
2233 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2234 break;
2235 default:
2236 goto out;
2237 }
2238
2239 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2240 cm_id_priv->id.state;
2241 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2242 if (!ret)
2243 list_add_tail(&work->list, &cm_id_priv->work_list);
2244 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2245
2246 if (ret)
2247 cm_process_work(cm_id_priv, work);
2248 else
2249 cm_deref_id(cm_id_priv);
2250 return 0;
2251out:
2252 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2253 cm_deref_id(cm_id_priv);
2254 return -EINVAL;
2255}
2256
2257static void cm_format_lap(struct cm_lap_msg *lap_msg,
2258 struct cm_id_private *cm_id_priv,
2259 struct ib_sa_path_rec *alternate_path,
2260 const void *private_data,
2261 u8 private_data_len)
2262{
2263 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2264 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2265 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2266 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2267 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2268 /* todo: need remote CM response timeout */
2269 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2270 lap_msg->alt_local_lid = alternate_path->slid;
2271 lap_msg->alt_remote_lid = alternate_path->dlid;
2272 lap_msg->alt_local_gid = alternate_path->sgid;
2273 lap_msg->alt_remote_gid = alternate_path->dgid;
2274 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2275 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2276 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2277 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2278 cm_lap_set_sl(lap_msg, alternate_path->sl);
2279 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2280 cm_lap_set_local_ack_timeout(lap_msg,
2281 min(31, alternate_path->packet_life_time + 1));
2282
2283 if (private_data && private_data_len)
2284 memcpy(lap_msg->private_data, private_data, private_data_len);
2285}
2286
2287int ib_send_cm_lap(struct ib_cm_id *cm_id,
2288 struct ib_sa_path_rec *alternate_path,
2289 const void *private_data,
2290 u8 private_data_len)
2291{
2292 struct cm_id_private *cm_id_priv;
2293 struct ib_mad_send_buf *msg;
2294 struct ib_send_wr *bad_send_wr;
2295 unsigned long flags;
2296 int ret;
2297
2298 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2299 return -EINVAL;
2300
2301 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2302 spin_lock_irqsave(&cm_id_priv->lock, flags);
2303 if (cm_id->state != IB_CM_ESTABLISHED ||
2304 cm_id->lap_state != IB_CM_LAP_IDLE) {
2305 ret = -EINVAL;
2306 goto out;
2307 }
2308
2309 ret = cm_alloc_msg(cm_id_priv, &msg);
2310 if (ret)
2311 goto out;
2312
2313 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2314 alternate_path, private_data, private_data_len);
2315 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2316 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2317
2318 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2319 &msg->send_wr, &bad_send_wr);
2320 if (ret) {
2321 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2322 cm_free_msg(msg);
2323 return ret;
2324 }
2325
2326 cm_id->lap_state = IB_CM_LAP_SENT;
2327 cm_id_priv->msg = msg;
2328
2329out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2330 return ret;
2331}
2332EXPORT_SYMBOL(ib_send_cm_lap);
2333
2334static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2335 struct cm_lap_msg *lap_msg)
2336{
2337 memset(path, 0, sizeof *path);
2338 path->dgid = lap_msg->alt_local_gid;
2339 path->sgid = lap_msg->alt_remote_gid;
2340 path->dlid = lap_msg->alt_local_lid;
2341 path->slid = lap_msg->alt_remote_lid;
2342 path->flow_label = cm_lap_get_flow_label(lap_msg);
2343 path->hop_limit = lap_msg->alt_hop_limit;
2344 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2345 path->reversible = 1;
2346 /* pkey is same as in REQ */
2347 path->sl = cm_lap_get_sl(lap_msg);
2348 path->mtu_selector = IB_SA_EQ;
2349 /* mtu is same as in REQ */
2350 path->rate_selector = IB_SA_EQ;
2351 path->rate = cm_lap_get_packet_rate(lap_msg);
2352 path->packet_life_time_selector = IB_SA_EQ;
2353 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2354 path->packet_life_time -= (path->packet_life_time > 0);
2355}
2356
2357static int cm_lap_handler(struct cm_work *work)
2358{
2359 struct cm_id_private *cm_id_priv;
2360 struct cm_lap_msg *lap_msg;
2361 struct ib_cm_lap_event_param *param;
2362 struct ib_mad_send_buf *msg = NULL;
2363 struct ib_send_wr *bad_send_wr;
2364 unsigned long flags;
2365 int ret;
2366
2367 /* todo: verify LAP request and send reject APR if invalid. */
2368 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2369 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2370 lap_msg->local_comm_id);
2371 if (!cm_id_priv)
2372 return -EINVAL;
2373
2374 param = &work->cm_event.param.lap_rcvd;
2375 param->alternate_path = &work->path[0];
2376 cm_format_path_from_lap(param->alternate_path, lap_msg);
2377 work->cm_event.private_data = &lap_msg->private_data;
2378
2379 spin_lock_irqsave(&cm_id_priv->lock, flags);
2380 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2381 goto unlock;
2382
2383 switch (cm_id_priv->id.lap_state) {
2384 case IB_CM_LAP_IDLE:
2385 break;
2386 case IB_CM_MRA_LAP_SENT:
2387 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2388 goto unlock;
2389
2390 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2391 CM_MSG_RESPONSE_OTHER,
2392 cm_id_priv->service_timeout,
2393 cm_id_priv->private_data,
2394 cm_id_priv->private_data_len);
2395 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2396
2397 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2398 &msg->send_wr, &bad_send_wr))
2399 cm_free_msg(msg);
2400 goto deref;
2401 default:
2402 goto unlock;
2403 }
2404
2405 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2406 cm_id_priv->tid = lap_msg->hdr.tid;
2407 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2408 if (!ret)
2409 list_add_tail(&work->list, &cm_id_priv->work_list);
2410 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2411
2412 if (ret)
2413 cm_process_work(cm_id_priv, work);
2414 else
2415 cm_deref_id(cm_id_priv);
2416 return 0;
2417
2418unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2419deref: cm_deref_id(cm_id_priv);
2420 return -EINVAL;
2421}
2422
2423static void cm_format_apr(struct cm_apr_msg *apr_msg,
2424 struct cm_id_private *cm_id_priv,
2425 enum ib_cm_apr_status status,
2426 void *info,
2427 u8 info_length,
2428 const void *private_data,
2429 u8 private_data_len)
2430{
2431 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2432 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2433 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2434 apr_msg->ap_status = (u8) status;
2435
2436 if (info && info_length) {
2437 apr_msg->info_length = info_length;
2438 memcpy(apr_msg->info, info, info_length);
2439 }
2440
2441 if (private_data && private_data_len)
2442 memcpy(apr_msg->private_data, private_data, private_data_len);
2443}
2444
2445int ib_send_cm_apr(struct ib_cm_id *cm_id,
2446 enum ib_cm_apr_status status,
2447 void *info,
2448 u8 info_length,
2449 const void *private_data,
2450 u8 private_data_len)
2451{
2452 struct cm_id_private *cm_id_priv;
2453 struct ib_mad_send_buf *msg;
2454 struct ib_send_wr *bad_send_wr;
2455 unsigned long flags;
2456 int ret;
2457
2458 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2459 (info && info_length > IB_CM_APR_INFO_LENGTH))
2460 return -EINVAL;
2461
2462 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2463 spin_lock_irqsave(&cm_id_priv->lock, flags);
2464 if (cm_id->state != IB_CM_ESTABLISHED ||
2465 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2466 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2467 ret = -EINVAL;
2468 goto out;
2469 }
2470
2471 ret = cm_alloc_msg(cm_id_priv, &msg);
2472 if (ret)
2473 goto out;
2474
2475 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2476 info, info_length, private_data, private_data_len);
2477 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2478 &msg->send_wr, &bad_send_wr);
2479 if (ret) {
2480 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2481 cm_free_msg(msg);
2482 return ret;
2483 }
2484
2485 cm_id->lap_state = IB_CM_LAP_IDLE;
2486out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2487 return ret;
2488}
2489EXPORT_SYMBOL(ib_send_cm_apr);
2490
2491static int cm_apr_handler(struct cm_work *work)
2492{
2493 struct cm_id_private *cm_id_priv;
2494 struct cm_apr_msg *apr_msg;
2495 unsigned long flags;
2496 int ret;
2497
2498 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2499 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2500 apr_msg->local_comm_id);
2501 if (!cm_id_priv)
2502 return -EINVAL; /* Unmatched reply. */
2503
2504 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2505 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2506 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2507 work->cm_event.private_data = &apr_msg->private_data;
2508
2509 spin_lock_irqsave(&cm_id_priv->lock, flags);
2510 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2511 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2512 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2513 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2514 goto out;
2515 }
2516 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2517 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2518 (unsigned long) cm_id_priv->msg);
2519 cm_id_priv->msg = NULL;
2520
2521 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2522 if (!ret)
2523 list_add_tail(&work->list, &cm_id_priv->work_list);
2524 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2525
2526 if (ret)
2527 cm_process_work(cm_id_priv, work);
2528 else
2529 cm_deref_id(cm_id_priv);
2530 return 0;
2531out:
2532 cm_deref_id(cm_id_priv);
2533 return -EINVAL;
2534}
2535
2536static int cm_timewait_handler(struct cm_work *work)
2537{
2538 struct cm_timewait_info *timewait_info;
2539 struct cm_id_private *cm_id_priv;
2540 unsigned long flags;
2541 int ret;
2542
2543 timewait_info = (struct cm_timewait_info *)work;
2544 cm_cleanup_timewait(timewait_info);
2545
2546 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2547 timewait_info->work.remote_id);
2548 if (!cm_id_priv)
2549 return -EINVAL;
2550
2551 spin_lock_irqsave(&cm_id_priv->lock, flags);
2552 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2553 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2554 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2555 goto out;
2556 }
2557 cm_id_priv->id.state = IB_CM_IDLE;
2558 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2559 if (!ret)
2560 list_add_tail(&work->list, &cm_id_priv->work_list);
2561 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2562
2563 if (ret)
2564 cm_process_work(cm_id_priv, work);
2565 else
2566 cm_deref_id(cm_id_priv);
2567 return 0;
2568out:
2569 cm_deref_id(cm_id_priv);
2570 return -EINVAL;
2571}
2572
2573static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2574 struct cm_id_private *cm_id_priv,
2575 struct ib_cm_sidr_req_param *param)
2576{
2577 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2578 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2579 sidr_req_msg->request_id = cm_id_priv->id.local_id;
97f52eb4 2580 sidr_req_msg->pkey = cpu_to_be16(param->pkey);
a977049d
HR
2581 sidr_req_msg->service_id = param->service_id;
2582
2583 if (param->private_data && param->private_data_len)
2584 memcpy(sidr_req_msg->private_data, param->private_data,
2585 param->private_data_len);
2586}
2587
2588int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2589 struct ib_cm_sidr_req_param *param)
2590{
2591 struct cm_id_private *cm_id_priv;
2592 struct ib_mad_send_buf *msg;
2593 struct ib_send_wr *bad_send_wr;
2594 unsigned long flags;
2595 int ret;
2596
2597 if (!param->path || (param->private_data &&
2598 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2599 return -EINVAL;
2600
2601 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2602 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2603 if (ret)
2604 goto out;
2605
2606 cm_id->service_id = param->service_id;
97f52eb4 2607 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
2608 cm_id_priv->timeout_ms = param->timeout_ms;
2609 cm_id_priv->max_cm_retries = param->max_cm_retries;
2610 ret = cm_alloc_msg(cm_id_priv, &msg);
2611 if (ret)
2612 goto out;
2613
2614 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2615 param);
2616 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2617 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2618
2619 spin_lock_irqsave(&cm_id_priv->lock, flags);
2620 if (cm_id->state == IB_CM_IDLE)
2621 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2622 &msg->send_wr, &bad_send_wr);
2623 else
2624 ret = -EINVAL;
2625
2626 if (ret) {
2627 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2628 cm_free_msg(msg);
2629 goto out;
2630 }
2631 cm_id->state = IB_CM_SIDR_REQ_SENT;
2632 cm_id_priv->msg = msg;
2633 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2634out:
2635 return ret;
2636}
2637EXPORT_SYMBOL(ib_send_cm_sidr_req);
2638
2639static void cm_format_sidr_req_event(struct cm_work *work,
2640 struct ib_cm_id *listen_id)
2641{
2642 struct cm_sidr_req_msg *sidr_req_msg;
2643 struct ib_cm_sidr_req_event_param *param;
2644
2645 sidr_req_msg = (struct cm_sidr_req_msg *)
2646 work->mad_recv_wc->recv_buf.mad;
2647 param = &work->cm_event.param.sidr_req_rcvd;
97f52eb4 2648 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
a977049d 2649 param->listen_id = listen_id;
a977049d
HR
2650 param->port = work->port->port_num;
2651 work->cm_event.private_data = &sidr_req_msg->private_data;
2652}
2653
2654static int cm_sidr_req_handler(struct cm_work *work)
2655{
2656 struct ib_cm_id *cm_id;
2657 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2658 struct cm_sidr_req_msg *sidr_req_msg;
2659 struct ib_wc *wc;
2660 unsigned long flags;
2661
07d357d0 2662 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
a977049d
HR
2663 if (IS_ERR(cm_id))
2664 return PTR_ERR(cm_id);
2665 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2666
2667 /* Record SGID/SLID and request ID for lookup. */
2668 sidr_req_msg = (struct cm_sidr_req_msg *)
2669 work->mad_recv_wc->recv_buf.mad;
2670 wc = work->mad_recv_wc->wc;
97f52eb4 2671 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
a977049d
HR
2672 cm_id_priv->av.dgid.global.interface_id = 0;
2673 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2674 &cm_id_priv->av);
2675 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2676 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2677 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2678 atomic_inc(&cm_id_priv->work_count);
2679
2680 spin_lock_irqsave(&cm.lock, flags);
2681 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2682 if (cur_cm_id_priv) {
2683 spin_unlock_irqrestore(&cm.lock, flags);
2684 goto out; /* Duplicate message. */
2685 }
07d357d0
SH
2686 cur_cm_id_priv = cm_find_listen(cm_id->device,
2687 sidr_req_msg->service_id);
a977049d
HR
2688 if (!cur_cm_id_priv) {
2689 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2690 spin_unlock_irqrestore(&cm.lock, flags);
2691 /* todo: reply with no match */
2692 goto out; /* No match. */
2693 }
2694 atomic_inc(&cur_cm_id_priv->refcount);
2695 spin_unlock_irqrestore(&cm.lock, flags);
2696
2697 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2698 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2699 cm_id_priv->id.service_id = sidr_req_msg->service_id;
97f52eb4 2700 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
2701
2702 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2703 cm_process_work(cm_id_priv, work);
2704 cm_deref_id(cur_cm_id_priv);
2705 return 0;
2706out:
2707 ib_destroy_cm_id(&cm_id_priv->id);
2708 return -EINVAL;
2709}
2710
2711static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2712 struct cm_id_private *cm_id_priv,
2713 struct ib_cm_sidr_rep_param *param)
2714{
2715 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2716 cm_id_priv->tid);
2717 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2718 sidr_rep_msg->status = param->status;
2719 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2720 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2721 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2722
2723 if (param->info && param->info_length)
2724 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2725
2726 if (param->private_data && param->private_data_len)
2727 memcpy(sidr_rep_msg->private_data, param->private_data,
2728 param->private_data_len);
2729}
2730
2731int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2732 struct ib_cm_sidr_rep_param *param)
2733{
2734 struct cm_id_private *cm_id_priv;
2735 struct ib_mad_send_buf *msg;
2736 struct ib_send_wr *bad_send_wr;
2737 unsigned long flags;
2738 int ret;
2739
2740 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2741 (param->private_data &&
2742 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2743 return -EINVAL;
2744
2745 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2746 spin_lock_irqsave(&cm_id_priv->lock, flags);
2747 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2748 ret = -EINVAL;
2749 goto error;
2750 }
2751
2752 ret = cm_alloc_msg(cm_id_priv, &msg);
2753 if (ret)
2754 goto error;
2755
2756 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2757 param);
2758 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2759 &msg->send_wr, &bad_send_wr);
2760 if (ret) {
2761 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2762 cm_free_msg(msg);
2763 return ret;
2764 }
2765 cm_id->state = IB_CM_IDLE;
2766 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2767
2768 spin_lock_irqsave(&cm.lock, flags);
2769 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2770 spin_unlock_irqrestore(&cm.lock, flags);
2771 return 0;
2772
2773error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2774 return ret;
2775}
2776EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2777
2778static void cm_format_sidr_rep_event(struct cm_work *work)
2779{
2780 struct cm_sidr_rep_msg *sidr_rep_msg;
2781 struct ib_cm_sidr_rep_event_param *param;
2782
2783 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2784 work->mad_recv_wc->recv_buf.mad;
2785 param = &work->cm_event.param.sidr_rep_rcvd;
2786 param->status = sidr_rep_msg->status;
2787 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2788 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2789 param->info = &sidr_rep_msg->info;
2790 param->info_len = sidr_rep_msg->info_length;
2791 work->cm_event.private_data = &sidr_rep_msg->private_data;
2792}
2793
2794static int cm_sidr_rep_handler(struct cm_work *work)
2795{
2796 struct cm_sidr_rep_msg *sidr_rep_msg;
2797 struct cm_id_private *cm_id_priv;
2798 unsigned long flags;
2799
2800 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2801 work->mad_recv_wc->recv_buf.mad;
2802 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2803 if (!cm_id_priv)
2804 return -EINVAL; /* Unmatched reply. */
2805
2806 spin_lock_irqsave(&cm_id_priv->lock, flags);
2807 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2808 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2809 goto out;
2810 }
2811 cm_id_priv->id.state = IB_CM_IDLE;
2812 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2813 (unsigned long) cm_id_priv->msg);
2814 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2815
2816 cm_format_sidr_rep_event(work);
2817 cm_process_work(cm_id_priv, work);
2818 return 0;
2819out:
2820 cm_deref_id(cm_id_priv);
2821 return -EINVAL;
2822}
2823
2824static void cm_process_send_error(struct ib_mad_send_buf *msg,
2825 enum ib_wc_status wc_status)
2826{
2827 struct cm_id_private *cm_id_priv;
2828 struct ib_cm_event cm_event;
2829 enum ib_cm_state state;
2830 unsigned long flags;
2831 int ret;
2832
2833 memset(&cm_event, 0, sizeof cm_event);
2834 cm_id_priv = msg->context[0];
2835
2836 /* Discard old sends or ones without a response. */
2837 spin_lock_irqsave(&cm_id_priv->lock, flags);
2838 state = (enum ib_cm_state) (unsigned long) msg->context[1];
2839 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2840 goto discard;
2841
2842 switch (state) {
2843 case IB_CM_REQ_SENT:
2844 case IB_CM_MRA_REQ_RCVD:
2845 cm_reset_to_idle(cm_id_priv);
2846 cm_event.event = IB_CM_REQ_ERROR;
2847 break;
2848 case IB_CM_REP_SENT:
2849 case IB_CM_MRA_REP_RCVD:
2850 cm_reset_to_idle(cm_id_priv);
2851 cm_event.event = IB_CM_REP_ERROR;
2852 break;
2853 case IB_CM_DREQ_SENT:
2854 cm_enter_timewait(cm_id_priv);
2855 cm_event.event = IB_CM_DREQ_ERROR;
2856 break;
2857 case IB_CM_SIDR_REQ_SENT:
2858 cm_id_priv->id.state = IB_CM_IDLE;
2859 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2860 break;
2861 default:
2862 goto discard;
2863 }
2864 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2865 cm_event.param.send_status = wc_status;
2866
2867 /* No other events can occur on the cm_id at this point. */
2868 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2869 cm_free_msg(msg);
2870 if (ret)
2871 ib_destroy_cm_id(&cm_id_priv->id);
2872 return;
2873discard:
2874 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2875 cm_free_msg(msg);
2876}
2877
2878static void cm_send_handler(struct ib_mad_agent *mad_agent,
2879 struct ib_mad_send_wc *mad_send_wc)
2880{
2881 struct ib_mad_send_buf *msg;
2882
2883 msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
2884
2885 switch (mad_send_wc->status) {
2886 case IB_WC_SUCCESS:
2887 case IB_WC_WR_FLUSH_ERR:
2888 cm_free_msg(msg);
2889 break;
2890 default:
2891 if (msg->context[0] && msg->context[1])
2892 cm_process_send_error(msg, mad_send_wc->status);
2893 else
2894 cm_free_msg(msg);
2895 break;
2896 }
2897}
2898
2899static void cm_work_handler(void *data)
2900{
2901 struct cm_work *work = data;
2902 int ret;
2903
2904 switch (work->cm_event.event) {
2905 case IB_CM_REQ_RECEIVED:
2906 ret = cm_req_handler(work);
2907 break;
2908 case IB_CM_MRA_RECEIVED:
2909 ret = cm_mra_handler(work);
2910 break;
2911 case IB_CM_REJ_RECEIVED:
2912 ret = cm_rej_handler(work);
2913 break;
2914 case IB_CM_REP_RECEIVED:
2915 ret = cm_rep_handler(work);
2916 break;
2917 case IB_CM_RTU_RECEIVED:
2918 ret = cm_rtu_handler(work);
2919 break;
2920 case IB_CM_USER_ESTABLISHED:
2921 ret = cm_establish_handler(work);
2922 break;
2923 case IB_CM_DREQ_RECEIVED:
2924 ret = cm_dreq_handler(work);
2925 break;
2926 case IB_CM_DREP_RECEIVED:
2927 ret = cm_drep_handler(work);
2928 break;
2929 case IB_CM_SIDR_REQ_RECEIVED:
2930 ret = cm_sidr_req_handler(work);
2931 break;
2932 case IB_CM_SIDR_REP_RECEIVED:
2933 ret = cm_sidr_rep_handler(work);
2934 break;
2935 case IB_CM_LAP_RECEIVED:
2936 ret = cm_lap_handler(work);
2937 break;
2938 case IB_CM_APR_RECEIVED:
2939 ret = cm_apr_handler(work);
2940 break;
2941 case IB_CM_TIMEWAIT_EXIT:
2942 ret = cm_timewait_handler(work);
2943 break;
2944 default:
2945 ret = -EINVAL;
2946 break;
2947 }
2948 if (ret)
2949 cm_free_work(work);
2950}
2951
2952int ib_cm_establish(struct ib_cm_id *cm_id)
2953{
2954 struct cm_id_private *cm_id_priv;
2955 struct cm_work *work;
2956 unsigned long flags;
2957 int ret = 0;
2958
2959 work = kmalloc(sizeof *work, GFP_ATOMIC);
2960 if (!work)
2961 return -ENOMEM;
2962
2963 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2964 spin_lock_irqsave(&cm_id_priv->lock, flags);
2965 switch (cm_id->state)
2966 {
2967 case IB_CM_REP_SENT:
2968 case IB_CM_MRA_REP_RCVD:
2969 cm_id->state = IB_CM_ESTABLISHED;
2970 break;
2971 case IB_CM_ESTABLISHED:
2972 ret = -EISCONN;
2973 break;
2974 default:
2975 ret = -EINVAL;
2976 break;
2977 }
2978 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2979
2980 if (ret) {
2981 kfree(work);
2982 goto out;
2983 }
2984
2985 /*
2986 * The CM worker thread may try to destroy the cm_id before it
2987 * can execute this work item. To prevent potential deadlock,
2988 * we need to find the cm_id once we're in the context of the
2989 * worker thread, rather than holding a reference on it.
2990 */
2991 INIT_WORK(&work->work, cm_work_handler, work);
2992 work->local_id = cm_id->local_id;
2993 work->remote_id = cm_id->remote_id;
2994 work->mad_recv_wc = NULL;
2995 work->cm_event.event = IB_CM_USER_ESTABLISHED;
2996 queue_work(cm.wq, &work->work);
2997out:
2998 return ret;
2999}
3000EXPORT_SYMBOL(ib_cm_establish);
3001
3002static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3003 struct ib_mad_recv_wc *mad_recv_wc)
3004{
3005 struct cm_work *work;
3006 enum ib_cm_event_type event;
3007 int paths = 0;
3008
3009 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3010 case CM_REQ_ATTR_ID:
3011 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3012 alt_local_lid != 0);
3013 event = IB_CM_REQ_RECEIVED;
3014 break;
3015 case CM_MRA_ATTR_ID:
3016 event = IB_CM_MRA_RECEIVED;
3017 break;
3018 case CM_REJ_ATTR_ID:
3019 event = IB_CM_REJ_RECEIVED;
3020 break;
3021 case CM_REP_ATTR_ID:
3022 event = IB_CM_REP_RECEIVED;
3023 break;
3024 case CM_RTU_ATTR_ID:
3025 event = IB_CM_RTU_RECEIVED;
3026 break;
3027 case CM_DREQ_ATTR_ID:
3028 event = IB_CM_DREQ_RECEIVED;
3029 break;
3030 case CM_DREP_ATTR_ID:
3031 event = IB_CM_DREP_RECEIVED;
3032 break;
3033 case CM_SIDR_REQ_ATTR_ID:
3034 event = IB_CM_SIDR_REQ_RECEIVED;
3035 break;
3036 case CM_SIDR_REP_ATTR_ID:
3037 event = IB_CM_SIDR_REP_RECEIVED;
3038 break;
3039 case CM_LAP_ATTR_ID:
3040 paths = 1;
3041 event = IB_CM_LAP_RECEIVED;
3042 break;
3043 case CM_APR_ATTR_ID:
3044 event = IB_CM_APR_RECEIVED;
3045 break;
3046 default:
3047 ib_free_recv_mad(mad_recv_wc);
3048 return;
3049 }
3050
3051 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3052 GFP_KERNEL);
3053 if (!work) {
3054 ib_free_recv_mad(mad_recv_wc);
3055 return;
3056 }
3057
3058 INIT_WORK(&work->work, cm_work_handler, work);
3059 work->cm_event.event = event;
3060 work->mad_recv_wc = mad_recv_wc;
3061 work->port = (struct cm_port *)mad_agent->context;
3062 queue_work(cm.wq, &work->work);
3063}
3064
3065static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3066 struct ib_qp_attr *qp_attr,
3067 int *qp_attr_mask)
3068{
3069 unsigned long flags;
3070 int ret;
3071
3072 spin_lock_irqsave(&cm_id_priv->lock, flags);
3073 switch (cm_id_priv->id.state) {
3074 case IB_CM_REQ_SENT:
3075 case IB_CM_MRA_REQ_RCVD:
3076 case IB_CM_REQ_RCVD:
3077 case IB_CM_MRA_REQ_SENT:
3078 case IB_CM_REP_RCVD:
3079 case IB_CM_MRA_REP_SENT:
3080 case IB_CM_REP_SENT:
3081 case IB_CM_MRA_REP_RCVD:
3082 case IB_CM_ESTABLISHED:
3083 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3084 IB_QP_PKEY_INDEX | IB_QP_PORT;
ae7971a7
SH
3085 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3086 IB_ACCESS_REMOTE_WRITE;
a977049d 3087 if (cm_id_priv->responder_resources)
ae7971a7 3088 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
a977049d
HR
3089 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3090 qp_attr->port_num = cm_id_priv->av.port->port_num;
3091 ret = 0;
3092 break;
3093 default:
3094 ret = -EINVAL;
3095 break;
3096 }
3097 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3098 return ret;
3099}
3100
3101static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3102 struct ib_qp_attr *qp_attr,
3103 int *qp_attr_mask)
3104{
3105 unsigned long flags;
3106 int ret;
3107
3108 spin_lock_irqsave(&cm_id_priv->lock, flags);
3109 switch (cm_id_priv->id.state) {
3110 case IB_CM_REQ_RCVD:
3111 case IB_CM_MRA_REQ_SENT:
3112 case IB_CM_REP_RCVD:
3113 case IB_CM_MRA_REP_SENT:
3114 case IB_CM_REP_SENT:
3115 case IB_CM_MRA_REP_RCVD:
3116 case IB_CM_ESTABLISHED:
3117 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
ae7971a7 3118 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
a977049d
HR
3119 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3120 qp_attr->path_mtu = cm_id_priv->path_mtu;
3121 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3122 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
ae7971a7
SH
3123 if (cm_id_priv->qp_type == IB_QPT_RC) {
3124 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3125 IB_QP_MIN_RNR_TIMER;
3126 qp_attr->max_dest_rd_atomic =
3127 cm_id_priv->responder_resources;
3128 qp_attr->min_rnr_timer = 0;
3129 }
a977049d
HR
3130 if (cm_id_priv->alt_av.ah_attr.dlid) {
3131 *qp_attr_mask |= IB_QP_ALT_PATH;
3132 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3133 }
3134 ret = 0;
3135 break;
3136 default:
3137 ret = -EINVAL;
3138 break;
3139 }
3140 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3141 return ret;
3142}
3143
3144static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3145 struct ib_qp_attr *qp_attr,
3146 int *qp_attr_mask)
3147{
3148 unsigned long flags;
3149 int ret;
3150
3151 spin_lock_irqsave(&cm_id_priv->lock, flags);
3152 switch (cm_id_priv->id.state) {
3153 case IB_CM_REP_RCVD:
3154 case IB_CM_MRA_REP_SENT:
3155 case IB_CM_REP_SENT:
3156 case IB_CM_MRA_REP_RCVD:
3157 case IB_CM_ESTABLISHED:
ae7971a7 3158 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
a977049d 3159 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
ae7971a7
SH
3160 if (cm_id_priv->qp_type == IB_QPT_RC) {
3161 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3162 IB_QP_RNR_RETRY |
3163 IB_QP_MAX_QP_RD_ATOMIC;
3164 qp_attr->timeout = cm_id_priv->local_ack_timeout;
3165 qp_attr->retry_cnt = cm_id_priv->retry_count;
3166 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3167 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3168 }
a977049d
HR
3169 if (cm_id_priv->alt_av.ah_attr.dlid) {
3170 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3171 qp_attr->path_mig_state = IB_MIG_REARM;
3172 }
3173 ret = 0;
3174 break;
3175 default:
3176 ret = -EINVAL;
3177 break;
3178 }
3179 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3180 return ret;
3181}
3182
3183int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3184 struct ib_qp_attr *qp_attr,
3185 int *qp_attr_mask)
3186{
3187 struct cm_id_private *cm_id_priv;
3188 int ret;
3189
3190 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3191 switch (qp_attr->qp_state) {
3192 case IB_QPS_INIT:
3193 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3194 break;
3195 case IB_QPS_RTR:
3196 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3197 break;
3198 case IB_QPS_RTS:
3199 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3200 break;
3201 default:
3202 ret = -EINVAL;
3203 break;
3204 }
3205 return ret;
3206}
3207EXPORT_SYMBOL(ib_cm_init_qp_attr);
3208
97f52eb4 3209static __be64 cm_get_ca_guid(struct ib_device *device)
a977049d
HR
3210{
3211 struct ib_device_attr *device_attr;
97f52eb4 3212 __be64 guid;
a977049d
HR
3213 int ret;
3214
3215 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3216 if (!device_attr)
3217 return 0;
3218
3219 ret = ib_query_device(device, device_attr);
3220 guid = ret ? 0 : device_attr->node_guid;
3221 kfree(device_attr);
3222 return guid;
3223}
3224
3225static void cm_add_one(struct ib_device *device)
3226{
3227 struct cm_device *cm_dev;
3228 struct cm_port *port;
3229 struct ib_mad_reg_req reg_req = {
3230 .mgmt_class = IB_MGMT_CLASS_CM,
3231 .mgmt_class_version = IB_CM_CLASS_VERSION
3232 };
3233 struct ib_port_modify port_modify = {
3234 .set_port_cap_mask = IB_PORT_CM_SUP
3235 };
3236 unsigned long flags;
3237 int ret;
3238 u8 i;
3239
3240 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3241 device->phys_port_cnt, GFP_KERNEL);
3242 if (!cm_dev)
3243 return;
3244
3245 cm_dev->device = device;
3246 cm_dev->ca_guid = cm_get_ca_guid(device);
3247 if (!cm_dev->ca_guid)
3248 goto error1;
3249
3250 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3251 for (i = 1; i <= device->phys_port_cnt; i++) {
3252 port = &cm_dev->port[i-1];
3253 port->cm_dev = cm_dev;
3254 port->port_num = i;
3255 port->mad_agent = ib_register_mad_agent(device, i,
3256 IB_QPT_GSI,
3257 &reg_req,
3258 0,
3259 cm_send_handler,
3260 cm_recv_handler,
3261 port);
3262 if (IS_ERR(port->mad_agent))
3263 goto error2;
3264
3265 ret = ib_modify_port(device, i, 0, &port_modify);
3266 if (ret)
3267 goto error3;
3268 }
3269 ib_set_client_data(device, &cm_client, cm_dev);
3270
3271 write_lock_irqsave(&cm.device_lock, flags);
3272 list_add_tail(&cm_dev->list, &cm.device_list);
3273 write_unlock_irqrestore(&cm.device_lock, flags);
3274 return;
3275
3276error3:
3277 ib_unregister_mad_agent(port->mad_agent);
3278error2:
3279 port_modify.set_port_cap_mask = 0;
3280 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3281 while (--i) {
3282 port = &cm_dev->port[i-1];
3283 ib_modify_port(device, port->port_num, 0, &port_modify);
3284 ib_unregister_mad_agent(port->mad_agent);
3285 }
3286error1:
3287 kfree(cm_dev);
3288}
3289
3290static void cm_remove_one(struct ib_device *device)
3291{
3292 struct cm_device *cm_dev;
3293 struct cm_port *port;
3294 struct ib_port_modify port_modify = {
3295 .clr_port_cap_mask = IB_PORT_CM_SUP
3296 };
3297 unsigned long flags;
3298 int i;
3299
3300 cm_dev = ib_get_client_data(device, &cm_client);
3301 if (!cm_dev)
3302 return;
3303
3304 write_lock_irqsave(&cm.device_lock, flags);
3305 list_del(&cm_dev->list);
3306 write_unlock_irqrestore(&cm.device_lock, flags);
3307
3308 for (i = 1; i <= device->phys_port_cnt; i++) {
3309 port = &cm_dev->port[i-1];
3310 ib_modify_port(device, port->port_num, 0, &port_modify);
3311 ib_unregister_mad_agent(port->mad_agent);
3312 }
3313 kfree(cm_dev);
3314}
3315
3316static int __init ib_cm_init(void)
3317{
3318 int ret;
3319
3320 memset(&cm, 0, sizeof cm);
3321 INIT_LIST_HEAD(&cm.device_list);
3322 rwlock_init(&cm.device_lock);
3323 spin_lock_init(&cm.lock);
3324 cm.listen_service_table = RB_ROOT;
3325 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3326 cm.remote_id_table = RB_ROOT;
3327 cm.remote_qp_table = RB_ROOT;
3328 cm.remote_sidr_table = RB_ROOT;
3329 idr_init(&cm.local_id_table);
3330 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3331
3332 cm.wq = create_workqueue("ib_cm");
3333 if (!cm.wq)
3334 return -ENOMEM;
3335
3336 ret = ib_register_client(&cm_client);
3337 if (ret)
3338 goto error;
3339
3340 return 0;
3341error:
3342 destroy_workqueue(cm.wq);
3343 return ret;
3344}
3345
3346static void __exit ib_cm_cleanup(void)
3347{
3348 flush_workqueue(cm.wq);
3349 destroy_workqueue(cm.wq);
3350 ib_unregister_client(&cm_client);
5d7edb3c 3351 idr_destroy(&cm.local_id_table);
a977049d
HR
3352}
3353
3354module_init(ib_cm_init);
3355module_exit(ib_cm_cleanup);
3356
This page took 0.176636 seconds and 5 git commands to generate.