2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_cm.h>
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("InfiniBand CM");
52 MODULE_LICENSE("Dual BSD/GPL");
54 static void cm_add_one(struct ib_device
*device
);
55 static void cm_remove_one(struct ib_device
*device
);
57 static struct ib_client cm_client
= {
60 .remove
= cm_remove_one
65 struct list_head device_list
;
67 struct rb_root listen_service_table
;
68 u64 listen_service_id
;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table
;
71 struct rb_root remote_id_table
;
72 struct rb_root remote_sidr_table
;
73 struct idr local_id_table
;
74 struct workqueue_struct
*wq
;
78 struct cm_device
*cm_dev
;
79 struct ib_mad_agent
*mad_agent
;
84 struct list_head list
;
85 struct ib_device
*device
;
87 struct cm_port port
[0];
93 struct ib_ah_attr ah_attr
;
99 struct work_struct work
;
100 struct list_head list
;
101 struct cm_port
*port
;
102 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
103 __be32 local_id
; /* Established / timewait */
105 struct ib_cm_event cm_event
;
106 struct ib_sa_path_rec path
[0];
109 struct cm_timewait_info
{
110 struct cm_work work
; /* Must be first. */
111 struct rb_node remote_qp_node
;
112 struct rb_node remote_id_node
;
113 __be64 remote_ca_guid
;
115 u8 inserted_remote_qp
;
116 u8 inserted_remote_id
;
119 struct cm_id_private
{
122 struct rb_node service_node
;
123 struct rb_node sidr_id_node
;
125 wait_queue_head_t wait
;
128 struct ib_mad_send_buf
*msg
;
129 struct cm_timewait_info
*timewait_info
;
130 /* todo: use alternate port on send failure */
138 enum ib_qp_type qp_type
;
142 enum ib_mtu path_mtu
;
146 u8 responder_resources
;
148 u8 local_ack_timeout
;
153 struct list_head work_list
;
157 static void cm_work_handler(void *data
);
159 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
161 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
162 wake_up(&cm_id_priv
->wait
);
165 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
166 struct ib_mad_send_buf
**msg
)
168 struct ib_mad_agent
*mad_agent
;
169 struct ib_mad_send_buf
*m
;
172 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
173 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
177 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
178 cm_id_priv
->av
.pkey_index
,
179 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
186 /* Timeout set by caller if response is expected. */
188 m
->retries
= cm_id_priv
->max_cm_retries
;
190 atomic_inc(&cm_id_priv
->refcount
);
191 m
->context
[0] = cm_id_priv
;
196 static int cm_alloc_response_msg(struct cm_port
*port
,
197 struct ib_mad_recv_wc
*mad_recv_wc
,
198 struct ib_mad_send_buf
**msg
)
200 struct ib_mad_send_buf
*m
;
203 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
204 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
208 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
209 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
220 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
222 ib_destroy_ah(msg
->ah
);
224 cm_deref_id(msg
->context
[0]);
225 ib_free_send_mad(msg
);
228 static void * cm_copy_private_data(const void *private_data
,
233 if (!private_data
|| !private_data_len
)
236 data
= kmalloc(private_data_len
, GFP_KERNEL
);
238 return ERR_PTR(-ENOMEM
);
240 memcpy(data
, private_data
, private_data_len
);
244 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
245 void *private_data
, u8 private_data_len
)
247 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
248 kfree(cm_id_priv
->private_data
);
250 cm_id_priv
->private_data
= private_data
;
251 cm_id_priv
->private_data_len
= private_data_len
;
254 static void cm_set_ah_attr(struct ib_ah_attr
*ah_attr
, u8 port_num
,
255 u16 dlid
, u8 sl
, u16 src_path_bits
)
257 memset(ah_attr
, 0, sizeof ah_attr
);
258 ah_attr
->dlid
= dlid
;
260 ah_attr
->src_path_bits
= src_path_bits
;
261 ah_attr
->port_num
= port_num
;
264 static void cm_init_av_for_response(struct cm_port
*port
,
265 struct ib_wc
*wc
, struct cm_av
*av
)
268 av
->pkey_index
= wc
->pkey_index
;
269 cm_set_ah_attr(&av
->ah_attr
, port
->port_num
, wc
->slid
,
270 wc
->sl
, wc
->dlid_path_bits
);
273 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
275 struct cm_device
*cm_dev
;
276 struct cm_port
*port
= NULL
;
281 read_lock_irqsave(&cm
.device_lock
, flags
);
282 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
283 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
285 port
= &cm_dev
->port
[p
-1];
289 read_unlock_irqrestore(&cm
.device_lock
, flags
);
294 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
295 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
300 cm_set_ah_attr(&av
->ah_attr
, av
->port
->port_num
,
301 be16_to_cpu(path
->dlid
), path
->sl
,
302 be16_to_cpu(path
->slid
) & 0x7F);
303 av
->packet_life_time
= path
->packet_life_time
;
307 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
313 spin_lock_irqsave(&cm
.lock
, flags
);
314 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
, 1,
315 (__force
int *) &cm_id_priv
->id
.local_id
);
316 spin_unlock_irqrestore(&cm
.lock
, flags
);
317 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
321 static void cm_free_id(__be32 local_id
)
325 spin_lock_irqsave(&cm
.lock
, flags
);
326 idr_remove(&cm
.local_id_table
, (__force
int) local_id
);
327 spin_unlock_irqrestore(&cm
.lock
, flags
);
330 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
332 struct cm_id_private
*cm_id_priv
;
334 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int) local_id
);
336 if (cm_id_priv
->id
.remote_id
== remote_id
)
337 atomic_inc(&cm_id_priv
->refcount
);
345 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
347 struct cm_id_private
*cm_id_priv
;
350 spin_lock_irqsave(&cm
.lock
, flags
);
351 cm_id_priv
= cm_get_id(local_id
, remote_id
);
352 spin_unlock_irqrestore(&cm
.lock
, flags
);
357 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
359 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
360 struct rb_node
*parent
= NULL
;
361 struct cm_id_private
*cur_cm_id_priv
;
362 __be64 service_id
= cm_id_priv
->id
.service_id
;
363 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
367 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
369 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
370 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
371 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
))
372 return cur_cm_id_priv
;
374 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
375 link
= &(*link
)->rb_left
;
376 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
377 link
= &(*link
)->rb_right
;
378 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
379 link
= &(*link
)->rb_left
;
381 link
= &(*link
)->rb_right
;
383 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
384 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
388 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
391 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
392 struct cm_id_private
*cm_id_priv
;
395 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
396 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
397 cm_id_priv
->id
.service_id
&&
398 (cm_id_priv
->id
.device
== device
))
401 if (device
< cm_id_priv
->id
.device
)
402 node
= node
->rb_left
;
403 else if (device
> cm_id_priv
->id
.device
)
404 node
= node
->rb_right
;
405 else if (service_id
< cm_id_priv
->id
.service_id
)
406 node
= node
->rb_left
;
408 node
= node
->rb_right
;
413 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
416 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
417 struct rb_node
*parent
= NULL
;
418 struct cm_timewait_info
*cur_timewait_info
;
419 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
420 __be32 remote_id
= timewait_info
->work
.remote_id
;
424 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
426 if (remote_id
< cur_timewait_info
->work
.remote_id
)
427 link
= &(*link
)->rb_left
;
428 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
429 link
= &(*link
)->rb_right
;
430 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
431 link
= &(*link
)->rb_left
;
432 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
433 link
= &(*link
)->rb_right
;
435 return cur_timewait_info
;
437 timewait_info
->inserted_remote_id
= 1;
438 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
439 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
443 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
446 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
447 struct cm_timewait_info
*timewait_info
;
450 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
452 if (remote_id
< timewait_info
->work
.remote_id
)
453 node
= node
->rb_left
;
454 else if (remote_id
> timewait_info
->work
.remote_id
)
455 node
= node
->rb_right
;
456 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
457 node
= node
->rb_left
;
458 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
459 node
= node
->rb_right
;
461 return timewait_info
;
466 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
469 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
470 struct rb_node
*parent
= NULL
;
471 struct cm_timewait_info
*cur_timewait_info
;
472 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
473 __be32 remote_qpn
= timewait_info
->remote_qpn
;
477 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
479 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
480 link
= &(*link
)->rb_left
;
481 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
482 link
= &(*link
)->rb_right
;
483 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
484 link
= &(*link
)->rb_left
;
485 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
486 link
= &(*link
)->rb_right
;
488 return cur_timewait_info
;
490 timewait_info
->inserted_remote_qp
= 1;
491 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
492 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
496 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
499 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
500 struct rb_node
*parent
= NULL
;
501 struct cm_id_private
*cur_cm_id_priv
;
502 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
503 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
507 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
509 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
510 link
= &(*link
)->rb_left
;
511 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
512 link
= &(*link
)->rb_right
;
515 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
518 link
= &(*link
)->rb_left
;
520 link
= &(*link
)->rb_right
;
522 return cur_cm_id_priv
;
525 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
526 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
530 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
531 enum ib_cm_sidr_status status
)
533 struct ib_cm_sidr_rep_param param
;
535 memset(¶m
, 0, sizeof param
);
536 param
.status
= status
;
537 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
540 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
541 ib_cm_handler cm_handler
,
544 struct cm_id_private
*cm_id_priv
;
547 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
549 return ERR_PTR(-ENOMEM
);
551 cm_id_priv
->id
.state
= IB_CM_IDLE
;
552 cm_id_priv
->id
.device
= device
;
553 cm_id_priv
->id
.cm_handler
= cm_handler
;
554 cm_id_priv
->id
.context
= context
;
555 cm_id_priv
->id
.remote_cm_qpn
= 1;
556 ret
= cm_alloc_id(cm_id_priv
);
560 spin_lock_init(&cm_id_priv
->lock
);
561 init_waitqueue_head(&cm_id_priv
->wait
);
562 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
563 atomic_set(&cm_id_priv
->work_count
, -1);
564 atomic_set(&cm_id_priv
->refcount
, 1);
565 return &cm_id_priv
->id
;
569 return ERR_PTR(-ENOMEM
);
571 EXPORT_SYMBOL(ib_create_cm_id
);
573 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
575 struct cm_work
*work
;
577 if (list_empty(&cm_id_priv
->work_list
))
580 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
581 list_del(&work
->list
);
585 static void cm_free_work(struct cm_work
*work
)
587 if (work
->mad_recv_wc
)
588 ib_free_recv_mad(work
->mad_recv_wc
);
592 static inline int cm_convert_to_ms(int iba_time
)
594 /* approximate conversion to ms from 4.096us x 2^iba_time */
595 return 1 << max(iba_time
- 8, 0);
598 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
602 if (!timewait_info
->inserted_remote_id
&&
603 !timewait_info
->inserted_remote_qp
)
606 spin_lock_irqsave(&cm
.lock
, flags
);
607 if (timewait_info
->inserted_remote_id
) {
608 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
609 timewait_info
->inserted_remote_id
= 0;
612 if (timewait_info
->inserted_remote_qp
) {
613 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
614 timewait_info
->inserted_remote_qp
= 0;
616 spin_unlock_irqrestore(&cm
.lock
, flags
);
619 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
621 struct cm_timewait_info
*timewait_info
;
623 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
625 return ERR_PTR(-ENOMEM
);
627 timewait_info
->work
.local_id
= local_id
;
628 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
629 &timewait_info
->work
);
630 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
631 return timewait_info
;
634 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
639 * The cm_id could be destroyed by the user before we exit timewait.
640 * To protect against this, we search for the cm_id after exiting
641 * timewait before notifying the user that we've exited timewait.
643 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
644 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
645 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
646 msecs_to_jiffies(wait_time
));
647 cm_id_priv
->timewait_info
= NULL
;
650 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
652 cm_id_priv
->id
.state
= IB_CM_IDLE
;
653 if (cm_id_priv
->timewait_info
) {
654 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
655 kfree(cm_id_priv
->timewait_info
);
656 cm_id_priv
->timewait_info
= NULL
;
660 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
662 struct cm_id_private
*cm_id_priv
;
663 struct cm_work
*work
;
666 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
668 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
669 switch (cm_id
->state
) {
671 cm_id
->state
= IB_CM_IDLE
;
672 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
673 spin_lock_irqsave(&cm
.lock
, flags
);
674 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
675 spin_unlock_irqrestore(&cm
.lock
, flags
);
677 case IB_CM_SIDR_REQ_SENT
:
678 cm_id
->state
= IB_CM_IDLE
;
679 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
680 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
682 case IB_CM_SIDR_REQ_RCVD
:
683 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
684 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
687 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
688 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
689 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
690 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
691 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
694 case IB_CM_MRA_REQ_RCVD
:
696 case IB_CM_MRA_REP_RCVD
:
697 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
700 case IB_CM_MRA_REQ_SENT
:
702 case IB_CM_MRA_REP_SENT
:
703 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
704 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
707 case IB_CM_ESTABLISHED
:
708 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
709 ib_send_cm_dreq(cm_id
, NULL
, 0);
711 case IB_CM_DREQ_SENT
:
712 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
713 cm_enter_timewait(cm_id_priv
);
714 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
716 case IB_CM_DREQ_RCVD
:
717 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
718 ib_send_cm_drep(cm_id
, NULL
, 0);
721 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
725 cm_free_id(cm_id
->local_id
);
726 atomic_dec(&cm_id_priv
->refcount
);
727 wait_event(cm_id_priv
->wait
, !atomic_read(&cm_id_priv
->refcount
));
728 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
730 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
731 kfree(cm_id_priv
->private_data
);
734 EXPORT_SYMBOL(ib_destroy_cm_id
);
736 int ib_cm_listen(struct ib_cm_id
*cm_id
,
740 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
744 service_mask
= service_mask
? service_mask
:
745 __constant_cpu_to_be64(~0ULL);
746 service_id
&= service_mask
;
747 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
748 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
751 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
752 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
754 cm_id
->state
= IB_CM_LISTEN
;
756 spin_lock_irqsave(&cm
.lock
, flags
);
757 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
758 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
759 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
761 cm_id
->service_id
= service_id
;
762 cm_id
->service_mask
= service_mask
;
764 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
765 spin_unlock_irqrestore(&cm
.lock
, flags
);
767 if (cur_cm_id_priv
) {
768 cm_id
->state
= IB_CM_IDLE
;
773 EXPORT_SYMBOL(ib_cm_listen
);
775 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
776 enum cm_msg_sequence msg_seq
)
780 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
781 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
783 return cpu_to_be64(hi_tid
| low_tid
);
786 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
787 __be16 attr_id
, __be64 tid
)
789 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
790 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
791 hdr
->class_version
= IB_CM_CLASS_VERSION
;
792 hdr
->method
= IB_MGMT_METHOD_SEND
;
793 hdr
->attr_id
= attr_id
;
797 static void cm_format_req(struct cm_req_msg
*req_msg
,
798 struct cm_id_private
*cm_id_priv
,
799 struct ib_cm_req_param
*param
)
801 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
802 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
804 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
805 req_msg
->service_id
= param
->service_id
;
806 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
807 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
808 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
809 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
810 cm_req_set_remote_resp_timeout(req_msg
,
811 param
->remote_cm_response_timeout
);
812 cm_req_set_qp_type(req_msg
, param
->qp_type
);
813 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
814 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
815 cm_req_set_local_resp_timeout(req_msg
,
816 param
->local_cm_response_timeout
);
817 cm_req_set_retry_count(req_msg
, param
->retry_count
);
818 req_msg
->pkey
= param
->primary_path
->pkey
;
819 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
820 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
821 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
822 cm_req_set_srq(req_msg
, param
->srq
);
824 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
825 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
826 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
827 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
828 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
829 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
830 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
831 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
832 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
833 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
834 cm_req_set_primary_local_ack_timeout(req_msg
,
835 min(31, param
->primary_path
->packet_life_time
+ 1));
837 if (param
->alternate_path
) {
838 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
839 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
840 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
841 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
842 cm_req_set_alt_flow_label(req_msg
,
843 param
->alternate_path
->flow_label
);
844 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
845 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
846 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
847 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
848 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
849 cm_req_set_alt_local_ack_timeout(req_msg
,
850 min(31, param
->alternate_path
->packet_life_time
+ 1));
853 if (param
->private_data
&& param
->private_data_len
)
854 memcpy(req_msg
->private_data
, param
->private_data
,
855 param
->private_data_len
);
858 static inline int cm_validate_req_param(struct ib_cm_req_param
*param
)
860 /* peer-to-peer not supported */
861 if (param
->peer_to_peer
)
864 if (!param
->primary_path
)
867 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
870 if (param
->private_data
&&
871 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
874 if (param
->alternate_path
&&
875 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
876 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
882 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
883 struct ib_cm_req_param
*param
)
885 struct cm_id_private
*cm_id_priv
;
886 struct cm_req_msg
*req_msg
;
890 ret
= cm_validate_req_param(param
);
894 /* Verify that we're not in timewait. */
895 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
896 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
897 if (cm_id
->state
!= IB_CM_IDLE
) {
898 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
902 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
904 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
906 if (IS_ERR(cm_id_priv
->timewait_info
))
909 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
912 if (param
->alternate_path
) {
913 ret
= cm_init_av_by_path(param
->alternate_path
,
914 &cm_id_priv
->alt_av
);
918 cm_id
->service_id
= param
->service_id
;
919 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
920 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
921 param
->primary_path
->packet_life_time
) * 2 +
923 param
->remote_cm_response_timeout
);
924 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
925 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
926 cm_id_priv
->responder_resources
= param
->responder_resources
;
927 cm_id_priv
->retry_count
= param
->retry_count
;
928 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
929 cm_id_priv
->qp_type
= param
->qp_type
;
931 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
935 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
936 cm_format_req(req_msg
, cm_id_priv
, param
);
937 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
938 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
939 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
941 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
942 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
943 cm_id_priv
->local_ack_timeout
=
944 cm_req_get_primary_local_ack_timeout(req_msg
);
946 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
947 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
949 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
952 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
953 cm_id
->state
= IB_CM_REQ_SENT
;
954 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
957 error2
: cm_free_msg(cm_id_priv
->msg
);
958 error1
: kfree(cm_id_priv
->timewait_info
);
961 EXPORT_SYMBOL(ib_send_cm_req
);
963 static int cm_issue_rej(struct cm_port
*port
,
964 struct ib_mad_recv_wc
*mad_recv_wc
,
965 enum ib_cm_rej_reason reason
,
966 enum cm_msg_response msg_rejected
,
967 void *ari
, u8 ari_length
)
969 struct ib_mad_send_buf
*msg
= NULL
;
970 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
973 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
977 /* We just need common CM header information. Cast to any message. */
978 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
979 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
981 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
982 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
983 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
984 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
985 rej_msg
->reason
= cpu_to_be16(reason
);
987 if (ari
&& ari_length
) {
988 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
989 memcpy(rej_msg
->ari
, ari
, ari_length
);
992 ret
= ib_post_send_mad(msg
, NULL
);
999 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1000 __be32 local_qpn
, __be32 remote_qpn
)
1002 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1003 ((local_ca_guid
== remote_ca_guid
) &&
1004 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1007 static inline void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1008 struct ib_sa_path_rec
*primary_path
,
1009 struct ib_sa_path_rec
*alt_path
)
1011 memset(primary_path
, 0, sizeof *primary_path
);
1012 primary_path
->dgid
= req_msg
->primary_local_gid
;
1013 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1014 primary_path
->dlid
= req_msg
->primary_local_lid
;
1015 primary_path
->slid
= req_msg
->primary_remote_lid
;
1016 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1017 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1018 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1019 primary_path
->reversible
= 1;
1020 primary_path
->pkey
= req_msg
->pkey
;
1021 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1022 primary_path
->mtu_selector
= IB_SA_EQ
;
1023 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1024 primary_path
->rate_selector
= IB_SA_EQ
;
1025 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1026 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1027 primary_path
->packet_life_time
=
1028 cm_req_get_primary_local_ack_timeout(req_msg
);
1029 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1031 if (req_msg
->alt_local_lid
) {
1032 memset(alt_path
, 0, sizeof *alt_path
);
1033 alt_path
->dgid
= req_msg
->alt_local_gid
;
1034 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1035 alt_path
->dlid
= req_msg
->alt_local_lid
;
1036 alt_path
->slid
= req_msg
->alt_remote_lid
;
1037 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1038 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1039 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1040 alt_path
->reversible
= 1;
1041 alt_path
->pkey
= req_msg
->pkey
;
1042 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1043 alt_path
->mtu_selector
= IB_SA_EQ
;
1044 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1045 alt_path
->rate_selector
= IB_SA_EQ
;
1046 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1047 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1048 alt_path
->packet_life_time
=
1049 cm_req_get_alt_local_ack_timeout(req_msg
);
1050 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1054 static void cm_format_req_event(struct cm_work
*work
,
1055 struct cm_id_private
*cm_id_priv
,
1056 struct ib_cm_id
*listen_id
)
1058 struct cm_req_msg
*req_msg
;
1059 struct ib_cm_req_event_param
*param
;
1061 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1062 param
= &work
->cm_event
.param
.req_rcvd
;
1063 param
->listen_id
= listen_id
;
1064 param
->port
= cm_id_priv
->av
.port
->port_num
;
1065 param
->primary_path
= &work
->path
[0];
1066 if (req_msg
->alt_local_lid
)
1067 param
->alternate_path
= &work
->path
[1];
1069 param
->alternate_path
= NULL
;
1070 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1071 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1072 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1073 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1074 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1075 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1076 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1077 param
->local_cm_response_timeout
=
1078 cm_req_get_remote_resp_timeout(req_msg
);
1079 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1080 param
->remote_cm_response_timeout
=
1081 cm_req_get_local_resp_timeout(req_msg
);
1082 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1083 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1084 param
->srq
= cm_req_get_srq(req_msg
);
1085 work
->cm_event
.private_data
= &req_msg
->private_data
;
1088 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1089 struct cm_work
*work
)
1091 unsigned long flags
;
1094 /* We will typically only have the current event to report. */
1095 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1098 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1099 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1100 work
= cm_dequeue_work(cm_id_priv
);
1101 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1103 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1107 cm_deref_id(cm_id_priv
);
1109 ib_destroy_cm_id(&cm_id_priv
->id
);
1112 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1113 struct cm_id_private
*cm_id_priv
,
1114 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1115 const void *private_data
, u8 private_data_len
)
1117 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1118 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1119 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1120 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1121 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1123 if (private_data
&& private_data_len
)
1124 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1127 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1128 struct cm_id_private
*cm_id_priv
,
1129 enum ib_cm_rej_reason reason
,
1132 const void *private_data
,
1133 u8 private_data_len
)
1135 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1136 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1138 switch(cm_id_priv
->id
.state
) {
1139 case IB_CM_REQ_RCVD
:
1140 rej_msg
->local_comm_id
= 0;
1141 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1143 case IB_CM_MRA_REQ_SENT
:
1144 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1145 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1147 case IB_CM_REP_RCVD
:
1148 case IB_CM_MRA_REP_SENT
:
1149 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1150 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1153 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1154 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1158 rej_msg
->reason
= cpu_to_be16(reason
);
1159 if (ari
&& ari_length
) {
1160 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1161 memcpy(rej_msg
->ari
, ari
, ari_length
);
1164 if (private_data
&& private_data_len
)
1165 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1168 static void cm_dup_req_handler(struct cm_work
*work
,
1169 struct cm_id_private
*cm_id_priv
)
1171 struct ib_mad_send_buf
*msg
= NULL
;
1172 unsigned long flags
;
1175 /* Quick state check to discard duplicate REQs. */
1176 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1179 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1183 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1184 switch (cm_id_priv
->id
.state
) {
1185 case IB_CM_MRA_REQ_SENT
:
1186 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1187 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1188 cm_id_priv
->private_data
,
1189 cm_id_priv
->private_data_len
);
1191 case IB_CM_TIMEWAIT
:
1192 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1193 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1198 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1200 ret
= ib_post_send_mad(msg
, NULL
);
1205 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1206 free
: cm_free_msg(msg
);
1209 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1210 struct cm_id_private
*cm_id_priv
)
1212 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1213 struct cm_timewait_info
*timewait_info
;
1214 struct cm_req_msg
*req_msg
;
1215 unsigned long flags
;
1217 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1219 /* Check for duplicate REQ and stale connections. */
1220 spin_lock_irqsave(&cm
.lock
, flags
);
1221 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1223 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1225 if (timewait_info
) {
1226 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1227 timewait_info
->work
.remote_id
);
1228 spin_unlock_irqrestore(&cm
.lock
, flags
);
1229 if (cur_cm_id_priv
) {
1230 cm_dup_req_handler(work
, cur_cm_id_priv
);
1231 cm_deref_id(cur_cm_id_priv
);
1233 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1234 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1239 /* Find matching listen request. */
1240 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1241 req_msg
->service_id
);
1242 if (!listen_cm_id_priv
) {
1243 spin_unlock_irqrestore(&cm
.lock
, flags
);
1244 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1245 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1249 atomic_inc(&listen_cm_id_priv
->refcount
);
1250 atomic_inc(&cm_id_priv
->refcount
);
1251 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1252 atomic_inc(&cm_id_priv
->work_count
);
1253 spin_unlock_irqrestore(&cm
.lock
, flags
);
1254 return listen_cm_id_priv
;
1256 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1260 static int cm_req_handler(struct cm_work
*work
)
1262 struct ib_cm_id
*cm_id
;
1263 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1264 struct cm_req_msg
*req_msg
;
1267 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1269 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1271 return PTR_ERR(cm_id
);
1273 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1274 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1275 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1277 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1279 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1280 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1283 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1284 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1285 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1287 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1288 if (!listen_cm_id_priv
) {
1293 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1294 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1295 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1296 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1298 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1299 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1302 if (req_msg
->alt_local_lid
) {
1303 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1307 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1308 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1309 cm_req_get_local_resp_timeout(req_msg
));
1310 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1311 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1312 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1313 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1314 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1315 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1316 cm_id_priv
->local_ack_timeout
=
1317 cm_req_get_primary_local_ack_timeout(req_msg
);
1318 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1319 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1320 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1322 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1323 cm_process_work(cm_id_priv
, work
);
1324 cm_deref_id(listen_cm_id_priv
);
1327 error3
: atomic_dec(&cm_id_priv
->refcount
);
1328 cm_deref_id(listen_cm_id_priv
);
1329 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1330 error2
: kfree(cm_id_priv
->timewait_info
);
1331 cm_id_priv
->timewait_info
= NULL
;
1332 error1
: ib_destroy_cm_id(&cm_id_priv
->id
);
1336 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1337 struct cm_id_private
*cm_id_priv
,
1338 struct ib_cm_rep_param
*param
)
1340 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1341 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1342 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1343 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1344 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1345 rep_msg
->resp_resources
= param
->responder_resources
;
1346 rep_msg
->initiator_depth
= param
->initiator_depth
;
1347 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1348 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1349 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1350 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1351 cm_rep_set_srq(rep_msg
, param
->srq
);
1352 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1354 if (param
->private_data
&& param
->private_data_len
)
1355 memcpy(rep_msg
->private_data
, param
->private_data
,
1356 param
->private_data_len
);
1359 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1360 struct ib_cm_rep_param
*param
)
1362 struct cm_id_private
*cm_id_priv
;
1363 struct ib_mad_send_buf
*msg
;
1364 struct cm_rep_msg
*rep_msg
;
1365 unsigned long flags
;
1368 if (param
->private_data
&&
1369 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1372 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1373 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1374 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1375 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1380 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1384 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1385 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1386 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1387 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1389 ret
= ib_post_send_mad(msg
, NULL
);
1391 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1396 cm_id
->state
= IB_CM_REP_SENT
;
1397 cm_id_priv
->msg
= msg
;
1398 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1399 cm_id_priv
->responder_resources
= param
->responder_resources
;
1400 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1401 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1403 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1406 EXPORT_SYMBOL(ib_send_cm_rep
);
1408 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1409 struct cm_id_private
*cm_id_priv
,
1410 const void *private_data
,
1411 u8 private_data_len
)
1413 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1414 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1415 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1417 if (private_data
&& private_data_len
)
1418 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1421 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1422 const void *private_data
,
1423 u8 private_data_len
)
1425 struct cm_id_private
*cm_id_priv
;
1426 struct ib_mad_send_buf
*msg
;
1427 unsigned long flags
;
1431 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1434 data
= cm_copy_private_data(private_data
, private_data_len
);
1436 return PTR_ERR(data
);
1438 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1439 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1440 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1441 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1446 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1450 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1451 private_data
, private_data_len
);
1453 ret
= ib_post_send_mad(msg
, NULL
);
1455 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1461 cm_id
->state
= IB_CM_ESTABLISHED
;
1462 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1463 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1466 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1470 EXPORT_SYMBOL(ib_send_cm_rtu
);
1472 static void cm_format_rep_event(struct cm_work
*work
)
1474 struct cm_rep_msg
*rep_msg
;
1475 struct ib_cm_rep_event_param
*param
;
1477 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1478 param
= &work
->cm_event
.param
.rep_rcvd
;
1479 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1480 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1481 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1482 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1483 param
->responder_resources
= rep_msg
->initiator_depth
;
1484 param
->initiator_depth
= rep_msg
->resp_resources
;
1485 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1486 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1487 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1488 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1489 param
->srq
= cm_rep_get_srq(rep_msg
);
1490 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1493 static void cm_dup_rep_handler(struct cm_work
*work
)
1495 struct cm_id_private
*cm_id_priv
;
1496 struct cm_rep_msg
*rep_msg
;
1497 struct ib_mad_send_buf
*msg
= NULL
;
1498 unsigned long flags
;
1501 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1502 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1503 rep_msg
->local_comm_id
);
1507 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1511 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1512 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1513 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1514 cm_id_priv
->private_data
,
1515 cm_id_priv
->private_data_len
);
1516 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1517 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1518 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1519 cm_id_priv
->private_data
,
1520 cm_id_priv
->private_data_len
);
1523 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1525 ret
= ib_post_send_mad(msg
, NULL
);
1530 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1531 free
: cm_free_msg(msg
);
1532 deref
: cm_deref_id(cm_id_priv
);
1535 static int cm_rep_handler(struct cm_work
*work
)
1537 struct cm_id_private
*cm_id_priv
;
1538 struct cm_rep_msg
*rep_msg
;
1539 unsigned long flags
;
1542 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1543 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1545 cm_dup_rep_handler(work
);
1549 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1550 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1551 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1553 spin_lock_irqsave(&cm
.lock
, flags
);
1554 /* Check for duplicate REP. */
1555 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1556 spin_unlock_irqrestore(&cm
.lock
, flags
);
1560 /* Check for a stale connection. */
1561 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1562 spin_unlock_irqrestore(&cm
.lock
, flags
);
1563 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1564 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1569 spin_unlock_irqrestore(&cm
.lock
, flags
);
1571 cm_format_rep_event(work
);
1573 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1574 switch (cm_id_priv
->id
.state
) {
1575 case IB_CM_REQ_SENT
:
1576 case IB_CM_MRA_REQ_RCVD
:
1579 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1583 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1584 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1585 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1586 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1587 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1588 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1589 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1591 /* todo: handle peer_to_peer */
1593 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1594 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1596 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1597 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1600 cm_process_work(cm_id_priv
, work
);
1602 cm_deref_id(cm_id_priv
);
1605 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1606 cm_deref_id(cm_id_priv
);
1610 static int cm_establish_handler(struct cm_work
*work
)
1612 struct cm_id_private
*cm_id_priv
;
1613 unsigned long flags
;
1616 /* See comment in ib_cm_establish about lookup. */
1617 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1621 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1622 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1623 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1627 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1628 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1630 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1631 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1634 cm_process_work(cm_id_priv
, work
);
1636 cm_deref_id(cm_id_priv
);
1639 cm_deref_id(cm_id_priv
);
1643 static int cm_rtu_handler(struct cm_work
*work
)
1645 struct cm_id_private
*cm_id_priv
;
1646 struct cm_rtu_msg
*rtu_msg
;
1647 unsigned long flags
;
1650 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1651 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1652 rtu_msg
->local_comm_id
);
1656 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1658 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1659 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1660 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1661 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1664 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1666 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1667 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1669 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1670 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1673 cm_process_work(cm_id_priv
, work
);
1675 cm_deref_id(cm_id_priv
);
1678 cm_deref_id(cm_id_priv
);
1682 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1683 struct cm_id_private
*cm_id_priv
,
1684 const void *private_data
,
1685 u8 private_data_len
)
1687 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1688 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1689 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1690 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1691 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1693 if (private_data
&& private_data_len
)
1694 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1697 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1698 const void *private_data
,
1699 u8 private_data_len
)
1701 struct cm_id_private
*cm_id_priv
;
1702 struct ib_mad_send_buf
*msg
;
1703 unsigned long flags
;
1706 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1709 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1710 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1711 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1716 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1718 cm_enter_timewait(cm_id_priv
);
1722 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1723 private_data
, private_data_len
);
1724 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1725 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1727 ret
= ib_post_send_mad(msg
, NULL
);
1729 cm_enter_timewait(cm_id_priv
);
1730 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1735 cm_id
->state
= IB_CM_DREQ_SENT
;
1736 cm_id_priv
->msg
= msg
;
1737 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1740 EXPORT_SYMBOL(ib_send_cm_dreq
);
1742 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1743 struct cm_id_private
*cm_id_priv
,
1744 const void *private_data
,
1745 u8 private_data_len
)
1747 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1748 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1749 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1751 if (private_data
&& private_data_len
)
1752 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1755 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1756 const void *private_data
,
1757 u8 private_data_len
)
1759 struct cm_id_private
*cm_id_priv
;
1760 struct ib_mad_send_buf
*msg
;
1761 unsigned long flags
;
1765 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1768 data
= cm_copy_private_data(private_data
, private_data_len
);
1770 return PTR_ERR(data
);
1772 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1773 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1774 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1775 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1780 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1781 cm_enter_timewait(cm_id_priv
);
1783 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1787 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1788 private_data
, private_data_len
);
1790 ret
= ib_post_send_mad(msg
, NULL
);
1792 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1797 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1800 EXPORT_SYMBOL(ib_send_cm_drep
);
1802 static int cm_dreq_handler(struct cm_work
*work
)
1804 struct cm_id_private
*cm_id_priv
;
1805 struct cm_dreq_msg
*dreq_msg
;
1806 struct ib_mad_send_buf
*msg
= NULL
;
1807 unsigned long flags
;
1810 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1811 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1812 dreq_msg
->local_comm_id
);
1816 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1818 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1819 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1822 switch (cm_id_priv
->id
.state
) {
1823 case IB_CM_REP_SENT
:
1824 case IB_CM_DREQ_SENT
:
1825 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1827 case IB_CM_ESTABLISHED
:
1828 case IB_CM_MRA_REP_RCVD
:
1830 case IB_CM_TIMEWAIT
:
1831 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1834 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1835 cm_id_priv
->private_data
,
1836 cm_id_priv
->private_data_len
);
1837 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1839 if (ib_post_send_mad(msg
, NULL
))
1845 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1846 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1847 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1849 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1850 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1853 cm_process_work(cm_id_priv
, work
);
1855 cm_deref_id(cm_id_priv
);
1858 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1859 deref
: cm_deref_id(cm_id_priv
);
1863 static int cm_drep_handler(struct cm_work
*work
)
1865 struct cm_id_private
*cm_id_priv
;
1866 struct cm_drep_msg
*drep_msg
;
1867 unsigned long flags
;
1870 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1871 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1872 drep_msg
->local_comm_id
);
1876 work
->cm_event
.private_data
= &drep_msg
->private_data
;
1878 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1879 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
1880 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
1881 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1884 cm_enter_timewait(cm_id_priv
);
1886 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1887 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1889 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1890 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1893 cm_process_work(cm_id_priv
, work
);
1895 cm_deref_id(cm_id_priv
);
1898 cm_deref_id(cm_id_priv
);
1902 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
1903 enum ib_cm_rej_reason reason
,
1906 const void *private_data
,
1907 u8 private_data_len
)
1909 struct cm_id_private
*cm_id_priv
;
1910 struct ib_mad_send_buf
*msg
;
1911 unsigned long flags
;
1914 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
1915 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
1918 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1920 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1921 switch (cm_id
->state
) {
1922 case IB_CM_REQ_SENT
:
1923 case IB_CM_MRA_REQ_RCVD
:
1924 case IB_CM_REQ_RCVD
:
1925 case IB_CM_MRA_REQ_SENT
:
1926 case IB_CM_REP_RCVD
:
1927 case IB_CM_MRA_REP_SENT
:
1928 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1930 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1931 cm_id_priv
, reason
, ari
, ari_length
,
1932 private_data
, private_data_len
);
1934 cm_reset_to_idle(cm_id_priv
);
1936 case IB_CM_REP_SENT
:
1937 case IB_CM_MRA_REP_RCVD
:
1938 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1940 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1941 cm_id_priv
, reason
, ari
, ari_length
,
1942 private_data
, private_data_len
);
1944 cm_enter_timewait(cm_id_priv
);
1954 ret
= ib_post_send_mad(msg
, NULL
);
1958 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1961 EXPORT_SYMBOL(ib_send_cm_rej
);
1963 static void cm_format_rej_event(struct cm_work
*work
)
1965 struct cm_rej_msg
*rej_msg
;
1966 struct ib_cm_rej_event_param
*param
;
1968 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1969 param
= &work
->cm_event
.param
.rej_rcvd
;
1970 param
->ari
= rej_msg
->ari
;
1971 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
1972 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
1973 work
->cm_event
.private_data
= &rej_msg
->private_data
;
1976 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
1978 struct cm_timewait_info
*timewait_info
;
1979 struct cm_id_private
*cm_id_priv
;
1980 unsigned long flags
;
1983 remote_id
= rej_msg
->local_comm_id
;
1985 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
1986 spin_lock_irqsave(&cm
.lock
, flags
);
1987 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
1989 if (!timewait_info
) {
1990 spin_unlock_irqrestore(&cm
.lock
, flags
);
1993 cm_id_priv
= idr_find(&cm
.local_id_table
,
1994 (__force
int) timewait_info
->work
.local_id
);
1996 if (cm_id_priv
->id
.remote_id
== remote_id
)
1997 atomic_inc(&cm_id_priv
->refcount
);
2001 spin_unlock_irqrestore(&cm
.lock
, flags
);
2002 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2003 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2005 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2010 static int cm_rej_handler(struct cm_work
*work
)
2012 struct cm_id_private
*cm_id_priv
;
2013 struct cm_rej_msg
*rej_msg
;
2014 unsigned long flags
;
2017 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2018 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2022 cm_format_rej_event(work
);
2024 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2025 switch (cm_id_priv
->id
.state
) {
2026 case IB_CM_REQ_SENT
:
2027 case IB_CM_MRA_REQ_RCVD
:
2028 case IB_CM_REP_SENT
:
2029 case IB_CM_MRA_REP_RCVD
:
2030 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2032 case IB_CM_REQ_RCVD
:
2033 case IB_CM_MRA_REQ_SENT
:
2034 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2035 cm_enter_timewait(cm_id_priv
);
2037 cm_reset_to_idle(cm_id_priv
);
2039 case IB_CM_DREQ_SENT
:
2040 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2042 case IB_CM_REP_RCVD
:
2043 case IB_CM_MRA_REP_SENT
:
2044 case IB_CM_ESTABLISHED
:
2045 cm_enter_timewait(cm_id_priv
);
2048 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2053 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2055 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2056 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2059 cm_process_work(cm_id_priv
, work
);
2061 cm_deref_id(cm_id_priv
);
2064 cm_deref_id(cm_id_priv
);
2068 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2070 const void *private_data
,
2071 u8 private_data_len
)
2073 struct cm_id_private
*cm_id_priv
;
2074 struct ib_mad_send_buf
*msg
;
2076 unsigned long flags
;
2079 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2082 data
= cm_copy_private_data(private_data
, private_data_len
);
2084 return PTR_ERR(data
);
2086 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2088 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2089 switch(cm_id_priv
->id
.state
) {
2090 case IB_CM_REQ_RCVD
:
2091 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2095 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2096 CM_MSG_RESPONSE_REQ
, service_timeout
,
2097 private_data
, private_data_len
);
2098 ret
= ib_post_send_mad(msg
, NULL
);
2101 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2103 case IB_CM_REP_RCVD
:
2104 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2108 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2109 CM_MSG_RESPONSE_REP
, service_timeout
,
2110 private_data
, private_data_len
);
2111 ret
= ib_post_send_mad(msg
, NULL
);
2114 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2116 case IB_CM_ESTABLISHED
:
2117 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2121 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2122 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2123 private_data
, private_data_len
);
2124 ret
= ib_post_send_mad(msg
, NULL
);
2127 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2133 cm_id_priv
->service_timeout
= service_timeout
;
2134 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2135 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2138 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2142 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2147 EXPORT_SYMBOL(ib_send_cm_mra
);
2149 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2151 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2152 case CM_MSG_RESPONSE_REQ
:
2153 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2154 case CM_MSG_RESPONSE_REP
:
2155 case CM_MSG_RESPONSE_OTHER
:
2156 return cm_acquire_id(mra_msg
->remote_comm_id
,
2157 mra_msg
->local_comm_id
);
2163 static int cm_mra_handler(struct cm_work
*work
)
2165 struct cm_id_private
*cm_id_priv
;
2166 struct cm_mra_msg
*mra_msg
;
2167 unsigned long flags
;
2170 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2171 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2175 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2176 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2177 cm_mra_get_service_timeout(mra_msg
);
2178 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2179 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2181 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2182 switch (cm_id_priv
->id
.state
) {
2183 case IB_CM_REQ_SENT
:
2184 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2185 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2186 cm_id_priv
->msg
, timeout
))
2188 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2190 case IB_CM_REP_SENT
:
2191 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2192 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2193 cm_id_priv
->msg
, timeout
))
2195 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2197 case IB_CM_ESTABLISHED
:
2198 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2199 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2200 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2201 cm_id_priv
->msg
, timeout
))
2203 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2209 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2210 cm_id_priv
->id
.state
;
2211 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2213 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2214 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2217 cm_process_work(cm_id_priv
, work
);
2219 cm_deref_id(cm_id_priv
);
2222 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2223 cm_deref_id(cm_id_priv
);
2227 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2228 struct cm_id_private
*cm_id_priv
,
2229 struct ib_sa_path_rec
*alternate_path
,
2230 const void *private_data
,
2231 u8 private_data_len
)
2233 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2234 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2235 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2236 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2237 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2238 /* todo: need remote CM response timeout */
2239 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2240 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2241 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2242 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2243 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2244 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2245 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2246 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2247 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2248 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2249 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2250 cm_lap_set_local_ack_timeout(lap_msg
,
2251 min(31, alternate_path
->packet_life_time
+ 1));
2253 if (private_data
&& private_data_len
)
2254 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2257 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2258 struct ib_sa_path_rec
*alternate_path
,
2259 const void *private_data
,
2260 u8 private_data_len
)
2262 struct cm_id_private
*cm_id_priv
;
2263 struct ib_mad_send_buf
*msg
;
2264 unsigned long flags
;
2267 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2270 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2271 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2272 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2273 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2278 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2282 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2283 alternate_path
, private_data
, private_data_len
);
2284 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2285 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2287 ret
= ib_post_send_mad(msg
, NULL
);
2289 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2294 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2295 cm_id_priv
->msg
= msg
;
2297 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2300 EXPORT_SYMBOL(ib_send_cm_lap
);
2302 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2303 struct cm_lap_msg
*lap_msg
)
2305 memset(path
, 0, sizeof *path
);
2306 path
->dgid
= lap_msg
->alt_local_gid
;
2307 path
->sgid
= lap_msg
->alt_remote_gid
;
2308 path
->dlid
= lap_msg
->alt_local_lid
;
2309 path
->slid
= lap_msg
->alt_remote_lid
;
2310 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2311 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2312 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2313 path
->reversible
= 1;
2314 /* pkey is same as in REQ */
2315 path
->sl
= cm_lap_get_sl(lap_msg
);
2316 path
->mtu_selector
= IB_SA_EQ
;
2317 /* mtu is same as in REQ */
2318 path
->rate_selector
= IB_SA_EQ
;
2319 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2320 path
->packet_life_time_selector
= IB_SA_EQ
;
2321 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2322 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2325 static int cm_lap_handler(struct cm_work
*work
)
2327 struct cm_id_private
*cm_id_priv
;
2328 struct cm_lap_msg
*lap_msg
;
2329 struct ib_cm_lap_event_param
*param
;
2330 struct ib_mad_send_buf
*msg
= NULL
;
2331 unsigned long flags
;
2334 /* todo: verify LAP request and send reject APR if invalid. */
2335 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2336 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2337 lap_msg
->local_comm_id
);
2341 param
= &work
->cm_event
.param
.lap_rcvd
;
2342 param
->alternate_path
= &work
->path
[0];
2343 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2344 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2346 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2347 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2350 switch (cm_id_priv
->id
.lap_state
) {
2351 case IB_CM_LAP_IDLE
:
2353 case IB_CM_MRA_LAP_SENT
:
2354 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2357 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2358 CM_MSG_RESPONSE_OTHER
,
2359 cm_id_priv
->service_timeout
,
2360 cm_id_priv
->private_data
,
2361 cm_id_priv
->private_data_len
);
2362 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2364 if (ib_post_send_mad(msg
, NULL
))
2371 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2372 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2373 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2375 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2376 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2379 cm_process_work(cm_id_priv
, work
);
2381 cm_deref_id(cm_id_priv
);
2384 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2385 deref
: cm_deref_id(cm_id_priv
);
2389 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2390 struct cm_id_private
*cm_id_priv
,
2391 enum ib_cm_apr_status status
,
2394 const void *private_data
,
2395 u8 private_data_len
)
2397 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2398 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2399 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2400 apr_msg
->ap_status
= (u8
) status
;
2402 if (info
&& info_length
) {
2403 apr_msg
->info_length
= info_length
;
2404 memcpy(apr_msg
->info
, info
, info_length
);
2407 if (private_data
&& private_data_len
)
2408 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2411 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2412 enum ib_cm_apr_status status
,
2415 const void *private_data
,
2416 u8 private_data_len
)
2418 struct cm_id_private
*cm_id_priv
;
2419 struct ib_mad_send_buf
*msg
;
2420 unsigned long flags
;
2423 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2424 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2427 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2428 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2429 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2430 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2431 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2436 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2440 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2441 info
, info_length
, private_data
, private_data_len
);
2442 ret
= ib_post_send_mad(msg
, NULL
);
2444 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2449 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2450 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2453 EXPORT_SYMBOL(ib_send_cm_apr
);
2455 static int cm_apr_handler(struct cm_work
*work
)
2457 struct cm_id_private
*cm_id_priv
;
2458 struct cm_apr_msg
*apr_msg
;
2459 unsigned long flags
;
2462 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2463 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2464 apr_msg
->local_comm_id
);
2466 return -EINVAL
; /* Unmatched reply. */
2468 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2469 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2470 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2471 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2473 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2474 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2475 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2476 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2477 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2480 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2481 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2482 cm_id_priv
->msg
= NULL
;
2484 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2486 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2487 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2490 cm_process_work(cm_id_priv
, work
);
2492 cm_deref_id(cm_id_priv
);
2495 cm_deref_id(cm_id_priv
);
2499 static int cm_timewait_handler(struct cm_work
*work
)
2501 struct cm_timewait_info
*timewait_info
;
2502 struct cm_id_private
*cm_id_priv
;
2503 unsigned long flags
;
2506 timewait_info
= (struct cm_timewait_info
*)work
;
2507 cm_cleanup_timewait(timewait_info
);
2509 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2510 timewait_info
->work
.remote_id
);
2514 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2515 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2516 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2517 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2520 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2521 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2523 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2524 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2527 cm_process_work(cm_id_priv
, work
);
2529 cm_deref_id(cm_id_priv
);
2532 cm_deref_id(cm_id_priv
);
2536 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2537 struct cm_id_private
*cm_id_priv
,
2538 struct ib_cm_sidr_req_param
*param
)
2540 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2541 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2542 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2543 sidr_req_msg
->pkey
= cpu_to_be16(param
->pkey
);
2544 sidr_req_msg
->service_id
= param
->service_id
;
2546 if (param
->private_data
&& param
->private_data_len
)
2547 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2548 param
->private_data_len
);
2551 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2552 struct ib_cm_sidr_req_param
*param
)
2554 struct cm_id_private
*cm_id_priv
;
2555 struct ib_mad_send_buf
*msg
;
2556 unsigned long flags
;
2559 if (!param
->path
|| (param
->private_data
&&
2560 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2563 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2564 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2568 cm_id
->service_id
= param
->service_id
;
2569 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2570 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2571 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2572 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2576 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2578 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2579 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2581 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2582 if (cm_id
->state
== IB_CM_IDLE
)
2583 ret
= ib_post_send_mad(msg
, NULL
);
2588 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2592 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2593 cm_id_priv
->msg
= msg
;
2594 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2598 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2600 static void cm_format_sidr_req_event(struct cm_work
*work
,
2601 struct ib_cm_id
*listen_id
)
2603 struct cm_sidr_req_msg
*sidr_req_msg
;
2604 struct ib_cm_sidr_req_event_param
*param
;
2606 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2607 work
->mad_recv_wc
->recv_buf
.mad
;
2608 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2609 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2610 param
->listen_id
= listen_id
;
2611 param
->port
= work
->port
->port_num
;
2612 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2615 static int cm_sidr_req_handler(struct cm_work
*work
)
2617 struct ib_cm_id
*cm_id
;
2618 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2619 struct cm_sidr_req_msg
*sidr_req_msg
;
2621 unsigned long flags
;
2623 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2625 return PTR_ERR(cm_id
);
2626 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2628 /* Record SGID/SLID and request ID for lookup. */
2629 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2630 work
->mad_recv_wc
->recv_buf
.mad
;
2631 wc
= work
->mad_recv_wc
->wc
;
2632 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2633 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2634 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2636 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2637 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2638 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2639 atomic_inc(&cm_id_priv
->work_count
);
2641 spin_lock_irqsave(&cm
.lock
, flags
);
2642 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2643 if (cur_cm_id_priv
) {
2644 spin_unlock_irqrestore(&cm
.lock
, flags
);
2645 goto out
; /* Duplicate message. */
2647 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2648 sidr_req_msg
->service_id
);
2649 if (!cur_cm_id_priv
) {
2650 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2651 spin_unlock_irqrestore(&cm
.lock
, flags
);
2652 /* todo: reply with no match */
2653 goto out
; /* No match. */
2655 atomic_inc(&cur_cm_id_priv
->refcount
);
2656 spin_unlock_irqrestore(&cm
.lock
, flags
);
2658 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2659 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2660 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2661 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2663 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2664 cm_process_work(cm_id_priv
, work
);
2665 cm_deref_id(cur_cm_id_priv
);
2668 ib_destroy_cm_id(&cm_id_priv
->id
);
2672 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2673 struct cm_id_private
*cm_id_priv
,
2674 struct ib_cm_sidr_rep_param
*param
)
2676 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2678 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2679 sidr_rep_msg
->status
= param
->status
;
2680 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2681 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2682 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2684 if (param
->info
&& param
->info_length
)
2685 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2687 if (param
->private_data
&& param
->private_data_len
)
2688 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2689 param
->private_data_len
);
2692 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2693 struct ib_cm_sidr_rep_param
*param
)
2695 struct cm_id_private
*cm_id_priv
;
2696 struct ib_mad_send_buf
*msg
;
2697 unsigned long flags
;
2700 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2701 (param
->private_data
&&
2702 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2705 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2706 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2707 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2712 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2716 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2718 ret
= ib_post_send_mad(msg
, NULL
);
2720 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2724 cm_id
->state
= IB_CM_IDLE
;
2725 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2727 spin_lock_irqsave(&cm
.lock
, flags
);
2728 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2729 spin_unlock_irqrestore(&cm
.lock
, flags
);
2732 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2735 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2737 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2739 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2740 struct ib_cm_sidr_rep_event_param
*param
;
2742 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2743 work
->mad_recv_wc
->recv_buf
.mad
;
2744 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2745 param
->status
= sidr_rep_msg
->status
;
2746 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2747 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2748 param
->info
= &sidr_rep_msg
->info
;
2749 param
->info_len
= sidr_rep_msg
->info_length
;
2750 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2753 static int cm_sidr_rep_handler(struct cm_work
*work
)
2755 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2756 struct cm_id_private
*cm_id_priv
;
2757 unsigned long flags
;
2759 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2760 work
->mad_recv_wc
->recv_buf
.mad
;
2761 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2763 return -EINVAL
; /* Unmatched reply. */
2765 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2766 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2767 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2770 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2771 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2772 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2774 cm_format_sidr_rep_event(work
);
2775 cm_process_work(cm_id_priv
, work
);
2778 cm_deref_id(cm_id_priv
);
2782 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2783 enum ib_wc_status wc_status
)
2785 struct cm_id_private
*cm_id_priv
;
2786 struct ib_cm_event cm_event
;
2787 enum ib_cm_state state
;
2788 unsigned long flags
;
2791 memset(&cm_event
, 0, sizeof cm_event
);
2792 cm_id_priv
= msg
->context
[0];
2794 /* Discard old sends or ones without a response. */
2795 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2796 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2797 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2801 case IB_CM_REQ_SENT
:
2802 case IB_CM_MRA_REQ_RCVD
:
2803 cm_reset_to_idle(cm_id_priv
);
2804 cm_event
.event
= IB_CM_REQ_ERROR
;
2806 case IB_CM_REP_SENT
:
2807 case IB_CM_MRA_REP_RCVD
:
2808 cm_reset_to_idle(cm_id_priv
);
2809 cm_event
.event
= IB_CM_REP_ERROR
;
2811 case IB_CM_DREQ_SENT
:
2812 cm_enter_timewait(cm_id_priv
);
2813 cm_event
.event
= IB_CM_DREQ_ERROR
;
2815 case IB_CM_SIDR_REQ_SENT
:
2816 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2817 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2822 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2823 cm_event
.param
.send_status
= wc_status
;
2825 /* No other events can occur on the cm_id at this point. */
2826 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2829 ib_destroy_cm_id(&cm_id_priv
->id
);
2832 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2836 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2837 struct ib_mad_send_wc
*mad_send_wc
)
2839 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2841 switch (mad_send_wc
->status
) {
2843 case IB_WC_WR_FLUSH_ERR
:
2847 if (msg
->context
[0] && msg
->context
[1])
2848 cm_process_send_error(msg
, mad_send_wc
->status
);
2855 static void cm_work_handler(void *data
)
2857 struct cm_work
*work
= data
;
2860 switch (work
->cm_event
.event
) {
2861 case IB_CM_REQ_RECEIVED
:
2862 ret
= cm_req_handler(work
);
2864 case IB_CM_MRA_RECEIVED
:
2865 ret
= cm_mra_handler(work
);
2867 case IB_CM_REJ_RECEIVED
:
2868 ret
= cm_rej_handler(work
);
2870 case IB_CM_REP_RECEIVED
:
2871 ret
= cm_rep_handler(work
);
2873 case IB_CM_RTU_RECEIVED
:
2874 ret
= cm_rtu_handler(work
);
2876 case IB_CM_USER_ESTABLISHED
:
2877 ret
= cm_establish_handler(work
);
2879 case IB_CM_DREQ_RECEIVED
:
2880 ret
= cm_dreq_handler(work
);
2882 case IB_CM_DREP_RECEIVED
:
2883 ret
= cm_drep_handler(work
);
2885 case IB_CM_SIDR_REQ_RECEIVED
:
2886 ret
= cm_sidr_req_handler(work
);
2888 case IB_CM_SIDR_REP_RECEIVED
:
2889 ret
= cm_sidr_rep_handler(work
);
2891 case IB_CM_LAP_RECEIVED
:
2892 ret
= cm_lap_handler(work
);
2894 case IB_CM_APR_RECEIVED
:
2895 ret
= cm_apr_handler(work
);
2897 case IB_CM_TIMEWAIT_EXIT
:
2898 ret
= cm_timewait_handler(work
);
2908 int ib_cm_establish(struct ib_cm_id
*cm_id
)
2910 struct cm_id_private
*cm_id_priv
;
2911 struct cm_work
*work
;
2912 unsigned long flags
;
2915 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
2919 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2920 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2921 switch (cm_id
->state
)
2923 case IB_CM_REP_SENT
:
2924 case IB_CM_MRA_REP_RCVD
:
2925 cm_id
->state
= IB_CM_ESTABLISHED
;
2927 case IB_CM_ESTABLISHED
:
2934 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2942 * The CM worker thread may try to destroy the cm_id before it
2943 * can execute this work item. To prevent potential deadlock,
2944 * we need to find the cm_id once we're in the context of the
2945 * worker thread, rather than holding a reference on it.
2947 INIT_WORK(&work
->work
, cm_work_handler
, work
);
2948 work
->local_id
= cm_id
->local_id
;
2949 work
->remote_id
= cm_id
->remote_id
;
2950 work
->mad_recv_wc
= NULL
;
2951 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
2952 queue_work(cm
.wq
, &work
->work
);
2956 EXPORT_SYMBOL(ib_cm_establish
);
2958 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
2959 struct ib_mad_recv_wc
*mad_recv_wc
)
2961 struct cm_work
*work
;
2962 enum ib_cm_event_type event
;
2965 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
2966 case CM_REQ_ATTR_ID
:
2967 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
2968 alt_local_lid
!= 0);
2969 event
= IB_CM_REQ_RECEIVED
;
2971 case CM_MRA_ATTR_ID
:
2972 event
= IB_CM_MRA_RECEIVED
;
2974 case CM_REJ_ATTR_ID
:
2975 event
= IB_CM_REJ_RECEIVED
;
2977 case CM_REP_ATTR_ID
:
2978 event
= IB_CM_REP_RECEIVED
;
2980 case CM_RTU_ATTR_ID
:
2981 event
= IB_CM_RTU_RECEIVED
;
2983 case CM_DREQ_ATTR_ID
:
2984 event
= IB_CM_DREQ_RECEIVED
;
2986 case CM_DREP_ATTR_ID
:
2987 event
= IB_CM_DREP_RECEIVED
;
2989 case CM_SIDR_REQ_ATTR_ID
:
2990 event
= IB_CM_SIDR_REQ_RECEIVED
;
2992 case CM_SIDR_REP_ATTR_ID
:
2993 event
= IB_CM_SIDR_REP_RECEIVED
;
2995 case CM_LAP_ATTR_ID
:
2997 event
= IB_CM_LAP_RECEIVED
;
2999 case CM_APR_ATTR_ID
:
3000 event
= IB_CM_APR_RECEIVED
;
3003 ib_free_recv_mad(mad_recv_wc
);
3007 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3010 ib_free_recv_mad(mad_recv_wc
);
3014 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3015 work
->cm_event
.event
= event
;
3016 work
->mad_recv_wc
= mad_recv_wc
;
3017 work
->port
= (struct cm_port
*)mad_agent
->context
;
3018 queue_work(cm
.wq
, &work
->work
);
3021 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3022 struct ib_qp_attr
*qp_attr
,
3025 unsigned long flags
;
3028 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3029 switch (cm_id_priv
->id
.state
) {
3030 case IB_CM_REQ_SENT
:
3031 case IB_CM_MRA_REQ_RCVD
:
3032 case IB_CM_REQ_RCVD
:
3033 case IB_CM_MRA_REQ_SENT
:
3034 case IB_CM_REP_RCVD
:
3035 case IB_CM_MRA_REP_SENT
:
3036 case IB_CM_REP_SENT
:
3037 case IB_CM_MRA_REP_RCVD
:
3038 case IB_CM_ESTABLISHED
:
3039 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3040 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3041 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
3042 IB_ACCESS_REMOTE_WRITE
;
3043 if (cm_id_priv
->responder_resources
)
3044 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
;
3045 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3046 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3053 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3057 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3058 struct ib_qp_attr
*qp_attr
,
3061 unsigned long flags
;
3064 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3065 switch (cm_id_priv
->id
.state
) {
3066 case IB_CM_REQ_RCVD
:
3067 case IB_CM_MRA_REQ_SENT
:
3068 case IB_CM_REP_RCVD
:
3069 case IB_CM_MRA_REP_SENT
:
3070 case IB_CM_REP_SENT
:
3071 case IB_CM_MRA_REP_RCVD
:
3072 case IB_CM_ESTABLISHED
:
3073 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3074 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3075 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3076 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3077 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3078 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3079 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3080 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3081 IB_QP_MIN_RNR_TIMER
;
3082 qp_attr
->max_dest_rd_atomic
=
3083 cm_id_priv
->responder_resources
;
3084 qp_attr
->min_rnr_timer
= 0;
3086 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3087 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3088 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3096 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3100 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3101 struct ib_qp_attr
*qp_attr
,
3104 unsigned long flags
;
3107 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3108 switch (cm_id_priv
->id
.state
) {
3109 case IB_CM_REP_RCVD
:
3110 case IB_CM_MRA_REP_SENT
:
3111 case IB_CM_REP_SENT
:
3112 case IB_CM_MRA_REP_RCVD
:
3113 case IB_CM_ESTABLISHED
:
3114 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3115 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3116 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3117 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3119 IB_QP_MAX_QP_RD_ATOMIC
;
3120 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3121 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3122 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3123 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3125 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3126 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3127 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3135 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3139 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3140 struct ib_qp_attr
*qp_attr
,
3143 struct cm_id_private
*cm_id_priv
;
3146 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3147 switch (qp_attr
->qp_state
) {
3149 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3152 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3155 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3163 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3165 static __be64
cm_get_ca_guid(struct ib_device
*device
)
3167 struct ib_device_attr
*device_attr
;
3171 device_attr
= kmalloc(sizeof *device_attr
, GFP_KERNEL
);
3175 ret
= ib_query_device(device
, device_attr
);
3176 guid
= ret
? 0 : device_attr
->node_guid
;
3181 static void cm_add_one(struct ib_device
*device
)
3183 struct cm_device
*cm_dev
;
3184 struct cm_port
*port
;
3185 struct ib_mad_reg_req reg_req
= {
3186 .mgmt_class
= IB_MGMT_CLASS_CM
,
3187 .mgmt_class_version
= IB_CM_CLASS_VERSION
3189 struct ib_port_modify port_modify
= {
3190 .set_port_cap_mask
= IB_PORT_CM_SUP
3192 unsigned long flags
;
3196 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3197 device
->phys_port_cnt
, GFP_KERNEL
);
3201 cm_dev
->device
= device
;
3202 cm_dev
->ca_guid
= cm_get_ca_guid(device
);
3203 if (!cm_dev
->ca_guid
)
3206 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3207 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3208 port
= &cm_dev
->port
[i
-1];
3209 port
->cm_dev
= cm_dev
;
3211 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3218 if (IS_ERR(port
->mad_agent
))
3221 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3225 ib_set_client_data(device
, &cm_client
, cm_dev
);
3227 write_lock_irqsave(&cm
.device_lock
, flags
);
3228 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3229 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3233 ib_unregister_mad_agent(port
->mad_agent
);
3235 port_modify
.set_port_cap_mask
= 0;
3236 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3238 port
= &cm_dev
->port
[i
-1];
3239 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3240 ib_unregister_mad_agent(port
->mad_agent
);
3246 static void cm_remove_one(struct ib_device
*device
)
3248 struct cm_device
*cm_dev
;
3249 struct cm_port
*port
;
3250 struct ib_port_modify port_modify
= {
3251 .clr_port_cap_mask
= IB_PORT_CM_SUP
3253 unsigned long flags
;
3256 cm_dev
= ib_get_client_data(device
, &cm_client
);
3260 write_lock_irqsave(&cm
.device_lock
, flags
);
3261 list_del(&cm_dev
->list
);
3262 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3264 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3265 port
= &cm_dev
->port
[i
-1];
3266 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3267 ib_unregister_mad_agent(port
->mad_agent
);
3272 static int __init
ib_cm_init(void)
3276 memset(&cm
, 0, sizeof cm
);
3277 INIT_LIST_HEAD(&cm
.device_list
);
3278 rwlock_init(&cm
.device_lock
);
3279 spin_lock_init(&cm
.lock
);
3280 cm
.listen_service_table
= RB_ROOT
;
3281 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3282 cm
.remote_id_table
= RB_ROOT
;
3283 cm
.remote_qp_table
= RB_ROOT
;
3284 cm
.remote_sidr_table
= RB_ROOT
;
3285 idr_init(&cm
.local_id_table
);
3286 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3288 cm
.wq
= create_workqueue("ib_cm");
3292 ret
= ib_register_client(&cm_client
);
3298 destroy_workqueue(cm
.wq
);
3302 static void __exit
ib_cm_cleanup(void)
3304 flush_workqueue(cm
.wq
);
3305 destroy_workqueue(cm
.wq
);
3306 ib_unregister_client(&cm_client
);
3307 idr_destroy(&cm
.local_id_table
);
3310 module_init(ib_cm_init
);
3311 module_exit(ib_cm_cleanup
);